diff options
631 files changed, 28752 insertions, 9856 deletions
diff --git a/.bzrignore b/.bzrignore index f3eea45467b..df903d582df 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1470,3 +1470,4 @@ libmysql/libmysql_versions.ld scripts/mysql_config.pl pcre/pcre_chartables.c pcre/test*grep +import_executables.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 3aafc852f16..c2e74eb4951 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,6 +26,14 @@ IF(POLICY CMP0022) CMAKE_POLICY(SET CMP0022 OLD) ENDIF() +# We use the LOCATION target property (CMP0026) +# and get_target_property() for non-existent targets (CMP0045) +IF(CMAKE_VERSION VERSION_EQUAL "3.0.0" OR + CMAKE_VERSION VERSION_GREATER "3.0.0") + CMAKE_POLICY(SET CMP0026 OLD) + CMAKE_POLICY(SET CMP0045 OLD) +ENDIF() + MESSAGE(STATUS "Running cmake version ${CMAKE_VERSION}") SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/cmake) @@ -268,8 +276,10 @@ IF (ENABLE_GCOV AND NOT WIN32 AND NOT APPLE) ENDIF() MY_CHECK_C_COMPILER_FLAG(-ggdb3 HAVE_GGDB3) -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -ggdb3") -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -ggdb3") +IF(HAVE_GGDB3) + SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -ggdb3") + SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -ggdb3") +ENDIF(HAVE_GGDB3) OPTION(ENABLED_LOCAL_INFILE "If we should should enable LOAD DATA LOCAL by default" ${IF_WIN}) @@ -310,16 +320,11 @@ ENDIF() # safemalloc can be enabled and disabled independently SET(WITH_SAFEMALLOC "AUTO" CACHE STRING "Use safemalloc memory debugger. Will result in slower execution. Options are: ON OFF AUTO.") -# force -DUSE_MYSYS_NEW unless already done by HAVE_CXX_NEW -IF(HAVE_CXX_NEW) - SET(DUSE_MYSYS_NEW "-DUSE_MYSYS_NEW") -ENDIF() - IF(WITH_SAFEMALLOC MATCHES "ON") - ADD_DEFINITIONS( -DSAFEMALLOC ${DUSE_MYSYS_NEW}) + ADD_DEFINITIONS( -DSAFEMALLOC) ELSEIF(WITH_SAFEMALLOC MATCHES "AUTO" AND NOT WIN32 AND NOT WITH_VALGRIND) SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC") - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC ${DUSE_MYSYS_NEW}") + SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC") ENDIF() # Set commonly used variables @@ -357,6 +362,11 @@ ENDIF() # Run platform tests INCLUDE(configure.cmake) +# force -DUSE_MYSYS_NEW unless already done by HAVE_CXX_NEW +IF(NOT HAVE_CXX_NEW) + ADD_DEFINITIONS(-DUSE_MYSYS_NEW) +ENDIF() + # Find header files from the bundled libraries # (jemalloc, yassl, readline, pcre, etc) # before the ones installed in the system @@ -393,6 +403,11 @@ ELSEIF(MYSQL_MAINTAINER_MODE MATCHES "AUTO") SET(CMAKE_CXX_FLAGS_DEBUG "${MY_MAINTAINER_CXX_WARNINGS} ${CMAKE_CXX_FLAGS_DEBUG}") ENDIF() +IF(CMAKE_CROSSCOMPILING) + SET(IMPORT_EXECUTABLES "IMPORTFILE-NOTFOUND" CACHE FILEPATH "Path to import_executables.cmake from a native build") + INCLUDE(${IMPORT_EXECUTABLES}) +ENDIF() + IF(WITH_UNIT_TESTS) ENABLE_TESTING() ADD_SUBDIRECTORY(unittest/mytap) @@ -444,7 +459,6 @@ IF(NOT WITHOUT_SERVER) IF(EXISTS ${CMAKE_SOURCE_DIR}/internal/CMakeLists.txt) ADD_SUBDIRECTORY(internal) ENDIF() - ADD_SUBDIRECTORY(packaging/rpm-uln) ADD_SUBDIRECTORY(packaging/rpm-oel) ENDIF() @@ -461,6 +475,13 @@ IF(WIN32) ENDIF() ADD_SUBDIRECTORY(packaging/solaris) +IF(NOT CMAKE_CROSSCOMPILING) + SET(EXPORTED comp_err comp_sql factorial gen_lex_hash) + # minimal target to build only binaries for export + ADD_CUSTOM_TARGET(import_executables DEPENDS ${EXPORTED}) + EXPORT(TARGETS ${EXPORTED} FILE ${CMAKE_BINARY_DIR}/import_executables.cmake) +ENDIF() + CONFIGURE_FILE(config.h.cmake ${CMAKE_BINARY_DIR}/include/my_config.h) CONFIGURE_FILE(config.h.cmake ${CMAKE_BINARY_DIR}/include/config.h) CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/include/mysql_version.h.in @@ -1,7 +1,3 @@ -# Version number for MariaDB is maintained here. -# The version string is created from: -# MYSQL_VERSION_MAJOR.MYSQL_VERSION_MINOR.MYSQL_VERSION_PATCH-MYSQL_VERSION_EXTRA -# MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=0 MYSQL_VERSION_PATCH=13 diff --git a/client/client_priv.h b/client/client_priv.h index ef93818829e..67a6e822ea3 100644 --- a/client/client_priv.h +++ b/client/client_priv.h @@ -92,6 +92,7 @@ enum options_client OPT_REPORT_PROGRESS, OPT_SKIP_ANNOTATE_ROWS_EVENTS, OPT_SSL_CRL, OPT_SSL_CRLPATH, + OPT_USE_GTID, OPT_GALERA_SST_MODE, OPT_MAX_CLIENT_OPTION /* should be always the last */ }; diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index bf33397efdb..9caeb37f61b 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -206,12 +206,12 @@ static void die(const char *fmt, ...) } -static void verbose(const char *fmt, ...) +static int verbose(const char *fmt, ...) { va_list args; if (opt_silent) - return; + return 0; /* Print the verbose message */ va_start(args, fmt); @@ -222,6 +222,7 @@ static void verbose(const char *fmt, ...) fflush(stdout); } va_end(args); + return 0; } @@ -741,20 +742,19 @@ static void print_conn_args(const char *tool_name) in the server using "mysqlcheck --check-upgrade .." */ -static int run_mysqlcheck_upgrade(void) +static int run_mysqlcheck_upgrade(const char *arg1, const char *arg2) { - verbose("Phase 2/3: Checking and upgrading tables"); print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--check-upgrade", - "--all-databases", "--auto-repair", !opt_silent || opt_verbose ? "--verbose": "", opt_silent ? "--silent": "", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", + arg1, arg2, "2>&1", NULL); } @@ -762,7 +762,7 @@ static int run_mysqlcheck_upgrade(void) static int run_mysqlcheck_fixnames(void) { - verbose("Phase 1/3: Fixing table and database names"); + verbose("Phase 2/3: Fixing table and database names"); print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ @@ -848,7 +848,6 @@ static int run_sql_fix_privilege_tables(void) if (init_dynamic_string(&ds_result, "", 512, 512)) die("Out of memory"); - verbose("Phase 3/3: Running 'mysql_fix_privilege_tables'..."); /* Individual queries can not be executed independently by invoking a forked mysql client, because the script uses session variables @@ -995,16 +994,12 @@ int main(int argc, char **argv) /* Find mysql */ find_tool(mysql_path, IF_WIN("mysql.exe", "mysql"), self_name); - if (!opt_systables_only) - { - /* Find mysqlcheck */ - find_tool(mysqlcheck_path, IF_WIN("mysqlcheck.exe", "mysqlcheck"), self_name); - } - else - { - if (!opt_silent) - printf("The --upgrade-system-tables option was used, databases won't be touched.\n"); - } + /* Find mysqlcheck */ + find_tool(mysqlcheck_path, IF_WIN("mysqlcheck.exe", "mysqlcheck"), self_name); + + if (opt_systables_only && !opt_silent) + printf("The --upgrade-system-tables option was used, user tables won't be touched.\n"); + /* Read the mysql_upgrade_info file to check if mysql_upgrade @@ -1024,16 +1019,16 @@ int main(int argc, char **argv) /* Run "mysqlcheck" and "mysql_fix_privilege_tables.sql" */ - if ((!opt_systables_only && - (run_mysqlcheck_fixnames() || run_mysqlcheck_upgrade())) || + verbose("Phase 1/3: Running 'mysql_fix_privilege_tables'..."); + if (run_mysqlcheck_upgrade("--databases", "mysql") || run_sql_fix_privilege_tables()) - { - /* - The upgrade failed to complete in some way or another, - significant error message should have been printed to the screen - */ die("Upgrade failed" ); - } + if (!opt_systables_only && + (run_mysqlcheck_fixnames() || + verbose("Phase 3/3: Checking and upgrading tables") || + run_mysqlcheck_upgrade("--all-databases","--skip-database=mysql"))) + die("Upgrade failed" ); + verbose("OK"); /* Create a file indicating upgrade has been performed */ diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index bd5d2eac4e5..88016d9563d 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -1,6 +1,6 @@ /* - Copyright (c) 2000, 2012, Oracle and/or its affiliates. - Copyright (c) 2010, 2012, Monty Program Ab. + Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -71,6 +71,7 @@ extern "C" my_bool get_one_option(int optid, const struct my_option *opt, char *argument); static my_bool sql_connect(MYSQL *mysql, uint wait); static int execute_commands(MYSQL *mysql,int argc, char **argv); +static char **mask_password(int argc, char ***argv); static int drop_db(MYSQL *mysql,const char *db); extern "C" sig_handler endprog(int signal_number); static void nice_time(ulong sec,char *buff); @@ -306,9 +307,9 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc,char *argv[]) { - int error= 0; + int error= 0, temp_argc; MYSQL mysql; - char **commands, **save_argv; + char **commands, **save_argv, **temp_argv; MY_INIT(argv[0]); mysql_init(&mysql); @@ -316,8 +317,12 @@ int main(int argc,char *argv[]) if ((error= load_defaults("my",load_default_groups,&argc,&argv))) goto err1; save_argv = argv; /* Save for free_defaults */ + if ((error=handle_options(&argc, &argv, my_long_options, get_one_option))) goto err2; + temp_argv= mask_password(argc, &argv); + temp_argc= argc; + if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) @@ -328,7 +333,7 @@ int main(int argc,char *argv[]) usage(); exit(1); } - commands = argv; + commands = temp_argv; if (tty_password) opt_password = get_tty_password(NullS); @@ -475,6 +480,13 @@ int main(int argc,char *argv[]) } /* got connection */ mysql_close(&mysql); + temp_argc--; + while(temp_argc >= 0) + { + my_free(temp_argv[temp_argc]); + temp_argc--; + } + my_free(temp_argv); err2: mysql_library_end(); my_free(opt_password); @@ -1216,6 +1228,47 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) return 0; } +/** + @brief Masking the password if it is passed as command line argument. + + @details It works in Linux and changes cmdline in ps and /proc/pid/cmdline, + but it won't work for history file of shell. + The command line arguments are copied to another array and the + password in the argv is masked. This function is called just after + "handle_options" because in "handle_options", the agrv pointers + are altered which makes freeing of dynamically allocated memory + difficult. The password masking is done before all other operations + in order to minimise the time frame of password visibility via cmdline. + + @param argc command line options (count) + @param argv command line options (values) + + @return temp_argv copy of argv +*/ + +static char **mask_password(int argc, char ***argv) +{ + char **temp_argv; + temp_argv= (char **)(my_malloc(sizeof(char *) * argc, MYF(MY_WME))); + argc--; + while (argc > 0) + { + temp_argv[argc]= my_strdup((*argv)[argc], MYF(MY_FAE)); + if (find_type((*argv)[argc - 1],&command_typelib, FIND_TYPE_BASIC) == ADMIN_PASSWORD || + find_type((*argv)[argc - 1],&command_typelib, FIND_TYPE_BASIC) == ADMIN_OLD_PASSWORD) + { + char *start= (*argv)[argc]; + while (*start) + *start++= 'x'; + start= (*argv)[argc]; + if (*start) + start[1]= 0; /* Cut length of argument */ + } + argc--; + } + temp_argv[argc]= my_strdup((*argv)[argc], MYF(MY_FAE)); + return(temp_argv); +} static void print_version(void) { diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index f7ae0783e5e..5a1dffd4014 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -51,6 +51,7 @@ static char *opt_password = 0, *current_user = 0, *default_charset= 0, *current_host= 0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static int first_error = 0; +static char *opt_skip_database; DYNAMIC_ARRAY tables4repair, tables4rebuild, alter_table_cmds; static char *shared_memory_base_name=0; static uint opt_protocol=0; @@ -178,6 +179,9 @@ static struct my_option my_long_options[] = #endif {"silent", 's', "Print only error messages.", &opt_silent, &opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"skip_database", 0, "Don't process the database specified as argument", + &opt_skip_database, &opt_skip_database, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -246,6 +250,9 @@ static void usage(void) puts("mysqlrepair: The default option will be -r"); puts("mysqlanalyze: The default option will be -a"); puts("mysqloptimize: The default option will be -o\n"); + printf("Usage: %s [OPTIONS] database [tables]\n", my_progname); + printf("OR %s [OPTIONS] --databases DB1 [DB2 DB3...]\n", + my_progname); puts("Please consult the MariaDB/MySQL knowledgebase at"); puts("http://kb.askmonty.org/v/mysqlcheck for latest information about"); puts("this program."); @@ -696,6 +703,9 @@ static int process_one_db(char *database) { DBUG_ENTER("process_one_db"); + if (opt_skip_database && !strcmp(database, opt_skip_database)) + DBUG_RETURN(0); + if (verbose) puts(database); if (what_to_do == DO_UPGRADE) diff --git a/client/mysqldump.c b/client/mysqldump.c index eba5bd93671..3adbe87a8bb 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -87,6 +87,9 @@ /* Chars needed to store LONGLONG, excluding trailing '\0'. */ #define LONGLONG_LEN 20 +/* Max length GTID position that we will output. */ +#define MAX_GTID_LENGTH 1024 + static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value); static ulong find_set(TYPELIB *lib, const char *x, uint length, @@ -135,6 +138,7 @@ static ulong opt_compatible_mode= 0; #define MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL 2 static uint opt_mysql_port= 0, opt_master_data; static uint opt_slave_data; +static uint opt_use_gtid; static uint my_end_arg; static char * opt_mysql_unix_port=0; static int first_error=0; @@ -355,6 +359,13 @@ static struct my_option my_long_options[] = "server receiving the resulting dump.", &opt_galera_sst_mode, &opt_galera_sst_mode, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"gtid", OPT_USE_GTID, "Used together with --master-data=1 or --dump-slave=1." + "When enabled, the output from those options will set the GTID position " + "instead of the binlog file and offset; the file/offset will appear only as " + "a comment. When disabled, the GTID position will still appear in the " + "output, but only commented.", + &opt_use_gtid, &opt_use_gtid, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"hex-blob", OPT_HEXBLOB, "Dump binary strings (BINARY, " @@ -1186,7 +1197,7 @@ check_consistent_binlog_pos(char *binlog_pos_file, char *binlog_pos_offset) if (mysql_query_with_error_report(mysql, &res, "SHOW STATUS LIKE 'binlog_snapshot_%'")) - return 1; + return 0; found= 0; while ((row= mysql_fetch_row(res))) @@ -1209,6 +1220,90 @@ check_consistent_binlog_pos(char *binlog_pos_file, char *binlog_pos_offset) return (found == 2); } + +/* + Get the GTID position corresponding to a given old-style binlog position. + This uses BINLOG_GTID_POS(). The advantage is that the GTID position can + be obtained completely non-blocking in this way (without the need for + FLUSH TABLES WITH READ LOCK), as the old-style position can be obtained + with START TRANSACTION WITH CONSISTENT SNAPSHOT. + + Returns 0 if ok, non-zero if error. +*/ +static int +get_binlog_gtid_pos(char *binlog_pos_file, char *binlog_pos_offset, + char *out_gtid_pos) +{ + DYNAMIC_STRING query; + MYSQL_RES *res; + MYSQL_ROW row; + int err; + char file_buf[FN_REFLEN*2+1], offset_buf[LONGLONG_LEN*2+1]; + size_t len_pos_file= strlen(binlog_pos_file); + size_t len_pos_offset= strlen(binlog_pos_offset); + + if (len_pos_file >= FN_REFLEN || len_pos_offset > LONGLONG_LEN) + return 0; + mysql_real_escape_string(mysql, file_buf, binlog_pos_file, len_pos_file); + mysql_real_escape_string(mysql, offset_buf, binlog_pos_offset, len_pos_offset); + init_dynamic_string_checked(&query, "SELECT BINLOG_GTID_POS('", 256, 1024); + dynstr_append_checked(&query, file_buf); + dynstr_append_checked(&query, "', '"); + dynstr_append_checked(&query, offset_buf); + dynstr_append_checked(&query, "')"); + + err= mysql_query_with_error_report(mysql, &res, query.str); + dynstr_free(&query); + if (err) + return err; + + err= 1; + if ((row= mysql_fetch_row(res))) + { + strmake(out_gtid_pos, row[0], MAX_GTID_LENGTH-1); + err= 0; + } + mysql_free_result(res); + + return err; +} + + +/* + Get the GTID position on a master or slave. + The parameter MASTER is non-zero to get the position on a master + (@@gtid_binlog_pos) or zero for a slave (@@gtid_slave_pos). + + This uses the @@gtid_binlog_pos or @@gtid_slave_pos, so requires FLUSH TABLES + WITH READ LOCK or similar to be consistent. + + Returns 0 if ok, non-zero for error. +*/ +static int +get_gtid_pos(char *out_gtid_pos, int master) +{ + MYSQL_RES *res; + MYSQL_ROW row; + int found; + + if (mysql_query_with_error_report(mysql, &res, + (master ? + "SELECT @@GLOBAL.gtid_binlog_pos" : + "SELECT @@GLOBAL.gtid_slave_pos"))) + return 1; + + found= 0; + if ((row= mysql_fetch_row(res))) + { + strmake(out_gtid_pos, row[0], MAX_GTID_LENGTH-1); + found++; + } + mysql_free_result(res); + + return (found != 1); +} + + static char *my_case_str(const char *str, uint str_len, const char *token, @@ -4844,12 +4939,15 @@ static int wsrep_set_sst_cmds(MYSQL *mysql) { return 0; } -static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) + +static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos, + int have_mariadb_gtid, int use_gtid) { MYSQL_ROW row; MYSQL_RES *UNINIT_VAR(master); char binlog_pos_file[FN_REFLEN]; char binlog_pos_offset[LONGLONG_LEN+1]; + char gtid_pos[MAX_GTID_LENGTH]; char *file, *offset; const char *comment_prefix= (opt_master_data == MYSQL_OPT_MASTER_DATA_COMMENTED_SQL) ? "-- " : ""; @@ -4860,6 +4958,9 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) return 1; file= binlog_pos_file; offset= binlog_pos_offset; + if (have_mariadb_gtid && + get_binlog_gtid_pos(binlog_pos_file, binlog_pos_offset, gtid_pos)) + return 1; } else { @@ -4889,6 +4990,9 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) return 0; } } + + if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 1)) + return 1; } /* SHOW MASTER STATUS reports file and position */ @@ -4897,7 +5001,19 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) "recovery from\n--\n\n"); fprintf(md_result_file, "%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n", - comment_prefix, file, offset); + (use_gtid ? "-- " : comment_prefix), file, offset); + if (have_mariadb_gtid) + { + print_comment(md_result_file, 0, + "\n--\n-- GTID to start replication from\n--\n\n"); + if (use_gtid) + fprintf(md_result_file, + "%sCHANGE MASTER TO MASTER_USE_GTID=slave_pos;\n", + comment_prefix); + fprintf(md_result_file, + "%sSET GLOBAL gtid_slave_pos='%s';\n", + (!use_gtid ? "-- " : comment_prefix), gtid_pos); + } check_io(md_result_file); if (!consistent_binlog_pos) @@ -4967,12 +5083,16 @@ static int add_slave_statements(void) return(0); } -static int do_show_slave_status(MYSQL *mysql_con) +static int do_show_slave_status(MYSQL *mysql_con, int use_gtid, + int have_mariadb_gtid) { MYSQL_RES *UNINIT_VAR(slave); MYSQL_ROW row; const char *comment_prefix= (opt_slave_data == MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL) ? "-- " : ""; + const char *gtid_comment_prefix= (use_gtid ? comment_prefix : "-- "); + const char *nogtid_comment_prefix= (!use_gtid ? comment_prefix : "-- "); + int set_gtid_done= 0; if (mysql_query_with_error_report(mysql_con, &slave, multi_source ? @@ -4990,8 +5110,30 @@ static int do_show_slave_status(MYSQL *mysql_con) while ((row= mysql_fetch_row(slave))) { + if (multi_source && !set_gtid_done) + { + char gtid_pos[MAX_GTID_LENGTH]; + if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 0)) + return 1; + if (opt_comments) + fprintf(md_result_file, "\n--\n-- Gtid position to start replication " + "from\n--\n\n"); + fprintf(md_result_file, "%sSET GLOBAL gtid_slave_pos='%s';\n", + gtid_comment_prefix, gtid_pos); + set_gtid_done= 1; + } if (row[9 + multi_source] && row[21 + multi_source]) { + if (use_gtid) + { + if (multi_source) + fprintf(md_result_file, "%sCHANGE MASTER '%.80s' TO " + "MASTER_USE_GTID=slave_pos;\n", gtid_comment_prefix, row[0]); + else + fprintf(md_result_file, "%sCHANGE MASTER TO " + "MASTER_USE_GTID=slave_pos;\n", gtid_comment_prefix); + } + /* SHOW MASTER STATUS reports file and position */ if (opt_comments) fprintf(md_result_file, @@ -5000,9 +5142,9 @@ static int do_show_slave_status(MYSQL *mysql_con) if (multi_source) fprintf(md_result_file, "%sCHANGE MASTER '%.80s' TO ", - comment_prefix, row[0]); + nogtid_comment_prefix, row[0]); else - fprintf(md_result_file, "%sCHANGE MASTER TO ", comment_prefix); + fprintf(md_result_file, "%sCHANGE MASTER TO ", nogtid_comment_prefix); if (opt_include_master_host_port) { @@ -5075,12 +5217,13 @@ static int do_flush_tables_read_lock(MYSQL *mysql_con) FLUSH TABLES is to lower the probability of a stage where both mysqldump and most client connections are stalled. Of course, if a second long update starts between the two FLUSHes, we have that bad stall. + + We use the LOCAL option, as we do not want the FLUSH TABLES replicated to + other servers. */ return - ( mysql_query_with_error_report(mysql_con, 0, - ((opt_master_data != 0) ? - "FLUSH /*!40101 LOCAL */ TABLES" : - "FLUSH TABLES")) || + ( mysql_query_with_error_report(mysql_con, 0, + "FLUSH /*!40101 LOCAL */ TABLES") || mysql_query_with_error_report(mysql_con, 0, "FLUSH TABLES WITH READ LOCK") ); } @@ -5701,6 +5844,7 @@ int main(int argc, char **argv) char bin_log_name[FN_REFLEN]; int exit_code; int consistent_binlog_pos= 0; + int have_mariadb_gtid= 0; MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ @@ -5741,7 +5885,10 @@ int main(int argc, char **argv) /* Check if the server support multi source */ if (mysql_get_server_version(mysql) >= 100000) + { multi_source= 2; + have_mariadb_gtid= 1; + } if (opt_slave_data && do_stop_slave_sql(mysql)) goto err; @@ -5792,9 +5939,11 @@ int main(int argc, char **argv) if (opt_galera_sst_mode && wsrep_set_sst_cmds(mysql)) goto err; - if (opt_master_data && do_show_master_status(mysql, consistent_binlog_pos)) + if (opt_master_data && do_show_master_status(mysql, consistent_binlog_pos, + have_mariadb_gtid, opt_use_gtid)) goto err; - if (opt_slave_data && do_show_slave_status(mysql)) + if (opt_slave_data && do_show_slave_status(mysql, opt_use_gtid, + have_mariadb_gtid)) goto err; if (opt_single_transaction && do_unlock_tables(mysql)) /* unlock but no commit! */ goto err; @@ -5811,19 +5960,36 @@ int main(int argc, char **argv) dump_all_tablespaces(); dump_all_databases(); } - else if (argc > 1 && !opt_databases) - { - /* Only one database and selected table(s) */ - if (!opt_alltspcs && !opt_notspcs) - dump_tablespaces_for_tables(*argv, (argv + 1), (argc -1)); - dump_selected_tables(*argv, (argv + 1), (argc - 1)); - } else { - /* One or more databases, all tables */ - if (!opt_alltspcs && !opt_notspcs) - dump_tablespaces_for_databases(argv); - dump_databases(argv); + // Check all arguments meet length condition. Currently database and table + // names are limited to NAME_LEN bytes and stack-based buffers assumes + // that escaped name will be not longer than NAME_LEN*2 + 2 bytes long. + int argument; + for (argument= 0; argument < argc; argument++) + { + size_t argument_length= strlen(argv[argument]); + if (argument_length > NAME_LEN) + { + die(EX_CONSCHECK, "[ERROR] Argument '%s' is too long, it cannot be " + "name for any table or database.\n", argv[argument]); + } + } + + if (argc > 1 && !opt_databases) + { + /* Only one database and selected table(s) */ + if (!opt_alltspcs && !opt_notspcs) + dump_tablespaces_for_tables(*argv, (argv + 1), (argc - 1)); + dump_selected_tables(*argv, (argv + 1), (argc - 1)); + } + else + { + /* One or more databases, all tables */ + if (!opt_alltspcs && !opt_notspcs) + dump_tablespaces_for_databases(argv); + dump_databases(argv); + } } /* add 'START SLAVE' to end of dump */ diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 3ba5eb80a07..01064f74261 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -1796,8 +1796,8 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) pthread_mutex_lock(&sleeper_mutex); master_wakeup= 0; - pthread_mutex_unlock(&sleeper_mutex); pthread_cond_broadcast(&sleep_threshhold); + pthread_mutex_unlock(&sleeper_mutex); gettimeofday(&start_time, NULL); diff --git a/client/mysqltest.cc b/client/mysqltest.cc index ef339d17a42..41f1d074dd9 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -8990,6 +8990,10 @@ int main(int argc, char **argv) 128, 0, 0, get_var_key, 0, var_free, MYF(0))) die("Variable hash initialization failed"); + { + char path_separator[]= { FN_LIBCHAR, 0 }; + var_set_string("SYSTEM_PATH_SEPARATOR", path_separator); + } var_set_string("MYSQL_SERVER_VERSION", MYSQL_SERVER_VERSION); var_set_string("MYSQL_SYSTEM_TYPE", SYSTEM_TYPE); var_set_string("MYSQL_MACHINE_TYPE", MACHINE_TYPE); @@ -9910,36 +9914,34 @@ struct st_regex int reg_replace(char** buf_p, int* buf_len_p, char *pattern, char *replace, char *string, int icase); +bool parse_re_part(char *start_re, char *end_re, + char **p, char *end, char **buf) +{ + if (*start_re != *end_re) + { + switch ((*start_re= *(*p)++)) { + case '(': *end_re= ')'; break; + case '[': *end_re= ']'; break; + case '{': *end_re= '}'; break; + case '<': *end_re= '>'; break; + default: *end_re= *start_re; + } + } + while (*p < end && **p != *end_re) + { + if ((*p)[0] == '\\' && *p + 1 < end && (*p)[1] == *end_re) + (*p)++; -/* - Finds the next (non-escaped) '/' in the expression. - (If the character '/' is needed, it can be escaped using '\'.) -*/ + *(*buf)++= *(*p)++; + } + *(*buf)++= 0; + + (*p)++; + + return *p > end; +} -#define PARSE_REGEX_ARG \ - while (p < expr_end) \ - { \ - char c= *p; \ - if (c == '/') \ - { \ - if (last_c == '\\') \ - { \ - buf_p[-1]= '/'; \ - } \ - else \ - { \ - *buf_p++ = 0; \ - break; \ - } \ - } \ - else \ - *buf_p++ = c; \ - \ - last_c= c; \ - p++; \ - } \ - \ /* Initializes the regular substitution expression to be used in the result output of test. @@ -9951,10 +9953,9 @@ struct st_replace_regex* init_replace_regex(char* expr) { struct st_replace_regex* res; char* buf,*expr_end; - char* p; + char* p, start_re, end_re= 1; char* buf_p; uint expr_len= strlen(expr); - char last_c = 0; struct st_regex reg; /* my_malloc() will die on fail with MY_FAE */ @@ -9972,44 +9973,32 @@ struct st_replace_regex* init_replace_regex(char* expr) { bzero(®,sizeof(reg)); /* find the start of the statement */ - while (p < expr_end) - { - if (*p == '/') - break; + while (my_isspace(charset_info, *p) && p < expr_end) p++; - } - if (p == expr_end || ++p == expr_end) + if (p >= expr_end) { if (res->regex_arr.elements) break; else goto err; } - /* we found the start */ - reg.pattern= buf_p; - /* Find first argument -- pattern string to be removed */ - PARSE_REGEX_ARG - - if (p == expr_end || ++p == expr_end) - goto err; + start_re= 0; + reg.pattern= buf_p; + if (parse_re_part(&start_re, &end_re, &p, expr_end, &buf_p)) + goto err; - /* buf_p now points to the replacement pattern terminated with \0 */ reg.replace= buf_p; - - /* Find second argument -- replace string to replace pattern */ - PARSE_REGEX_ARG - - if (p == expr_end) - goto err; - - /* skip the ending '/' in the statement */ - p++; + if (parse_re_part(&start_re, &end_re, &p, expr_end, &buf_p)) + goto err; /* Check if we should do matching case insensitive */ if (p < expr_end && *p == 'i') + { + p++; reg.icase= 1; + } /* done parsing the statement, now place it in regex_arr */ if (insert_dynamic(&res->regex_arr,(uchar*) ®)) diff --git a/cmake/dtrace.cmake b/cmake/dtrace.cmake index 1fc87cfcbef..5d0bb7ff8c9 100644 --- a/cmake/dtrace.cmake +++ b/cmake/dtrace.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -80,13 +80,6 @@ IF(ENABLE_DTRACE) ${CMAKE_BINARY_DIR}/include/probes_mysql_dtrace.h ${CMAKE_BINARY_DIR}/include/probes_mysql_nodtrace.h ) - IF(CMAKE_SYSTEM_NAME MATCHES "Linux") - # Systemtap object - EXECUTE_PROCESS( - COMMAND ${DTRACE} -G -s ${CMAKE_SOURCE_DIR}/include/probes_mysql.d.base - -o ${CMAKE_BINARY_DIR}/probes_mysql.o - ) - ENDIF() ADD_CUSTOM_TARGET(gen_dtrace_header DEPENDS ${CMAKE_BINARY_DIR}/include/probes_mysql.d @@ -105,12 +98,7 @@ FUNCTION(DTRACE_INSTRUMENT target) IF(ENABLE_DTRACE) ADD_DEPENDENCIES(${target} gen_dtrace_header) - IF(CMAKE_SYSTEM_NAME MATCHES "Linux") - TARGET_LINK_LIBRARIES(${target} ${CMAKE_BINARY_DIR}/probes_mysql.o) - ENDIF() - - # On Solaris, invoke dtrace -G to generate object file and - # link it together with target. + # Invoke dtrace to generate object file and link it together with target. IF(CMAKE_SYSTEM_NAME MATCHES "SunOS") SET(objdir ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${target}.dir) SET(outfile ${objdir}/${target}_dtrace.o) @@ -127,6 +115,21 @@ FUNCTION(DTRACE_INSTRUMENT target) -P ${CMAKE_SOURCE_DIR}/cmake/dtrace_prelink.cmake WORKING_DIRECTORY ${objdir} ) + ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Linux") + # dtrace on Linux runs gcc and uses flags from environment + SET(CFLAGS_SAVED $ENV{CFLAGS}) + SET(ENV{CFLAGS} ${CMAKE_C_FLAGS}) + SET(outfile "${CMAKE_BINARY_DIR}/probes_mysql.o") + # Systemtap object + EXECUTE_PROCESS( + COMMAND ${DTRACE} -G -s ${CMAKE_SOURCE_DIR}/include/probes_mysql.d.base + -o ${outfile} + ) + SET(ENV{CFLAGS} ${CFLAGS_SAVED}) + ENDIF() + + # Do not try to extend the library if we have not built the .o file + IF(outfile) # Add full object path to linker flags GET_TARGET_PROPERTY(target_type ${target} TYPE) IF(NOT target_type MATCHES "STATIC") @@ -138,12 +141,12 @@ FUNCTION(DTRACE_INSTRUMENT target) # but maybe one day this will be fixed. GET_TARGET_PROPERTY(target_location ${target} LOCATION) ADD_CUSTOM_COMMAND( - TARGET ${target} POST_BUILD - COMMAND ${CMAKE_AR} r ${target_location} ${outfile} - COMMAND ${CMAKE_RANLIB} ${target_location} - ) - # Used in DTRACE_INSTRUMENT_WITH_STATIC_LIBS - SET(TARGET_OBJECT_DIRECTORY_${target} ${objdir} CACHE INTERNAL "") + TARGET ${target} POST_BUILD + COMMAND ${CMAKE_AR} r ${target_location} ${outfile} + COMMAND ${CMAKE_RANLIB} ${target_location} + ) + # Used in DTRACE_INSTRUMENT_WITH_STATIC_LIBS + SET(TARGET_OBJECT_DIRECTORY_${target} ${objdir} CACHE INTERNAL "") ENDIF() ENDIF() ENDIF() diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake index 6cb5b02ecec..e16467619c7 100644 --- a/cmake/install_macros.cmake +++ b/cmake/install_macros.cmake @@ -63,7 +63,9 @@ FUNCTION (INSTALL_DEBUG_SYMBOLS) STRING(REPLACE ".dll" ".pdb" pdb_location ${pdb_location}) STRING(REPLACE ".lib" ".pdb" pdb_location ${pdb_location}) IF(CMAKE_GENERATOR MATCHES "Visual Studio") - STRING(REPLACE "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}" pdb_location ${pdb_location}) + STRING(REPLACE + "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}" + pdb_location ${pdb_location}) ENDIF() set(comp "") diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 254d9f6d946..d52e36193ca 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -62,22 +62,30 @@ IF(MINGW AND CMAKE_SIZEOF_VOID_P EQUAL 4) ENDIF() IF(MSVC) - # Enable debug info also in Release build, and create PDB to be able to analyze - # crashes - FOREACH(lang C CXX) - SET(CMAKE_${lang}_FLAGS_RELEASE "${CMAKE_${lang}_FLAGS_RELEASE} /Zi") - ENDFOREACH() + # Enable debug info also in Release build, + # and create PDB to be able to analyze crashes. FOREACH(type EXE SHARED MODULE) - SET(CMAKE_{type}_LINKER_FLAGS_RELEASE "${CMAKE_${type}_LINKER_FLAGS_RELEASE} /debug") + SET(CMAKE_{type}_LINKER_FLAGS_RELEASE + "${CMAKE_${type}_LINKER_FLAGS_RELEASE} /debug") ENDFOREACH() # Force static runtime libraries + # - Choose debugging information: + # /Z7 + # Produces an .obj file containing full symbolic debugging + # information for use with the debugger. The symbolic debugging + # information includes the names and types of variables, as well as + # functions and line numbers. No .pdb file is produced by the compiler. + FOREACH(lang C CXX) + SET(CMAKE_${lang}_FLAGS_RELEASE "${CMAKE_${lang}_FLAGS_RELEASE} /Z7") + ENDFOREACH() FOREACH(flag - CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT + CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELWITHDEBINFO + CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) + CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) STRING(REPLACE "/MD" "/MT" "${flag}" "${${flag}}") + STRING(REPLACE "/Zi" "/Z7" "${flag}" "${${flag}}") ENDFOREACH() @@ -105,7 +113,6 @@ IF(MSVC) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4800 /wd4805 /wd4996") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4800 /wd4805 /wd4996 /we4099") - IF(CMAKE_SIZEOF_VOID_P MATCHES 8) # _WIN64 is defined by the compiler itself. # Yet, we define it here again to work around a bug with Intellisense diff --git a/cmake/plugin.cmake b/cmake/plugin.cmake index 3724cc9004b..70dbb7c7ff1 100644 --- a/cmake/plugin.cmake +++ b/cmake/plugin.cmake @@ -203,7 +203,7 @@ MACRO(MYSQL_ADD_PLUGIN) NOT CPACK_COMPONENTS_ALL MATCHES ${ARG_COMPONENT} AND NOT WITH_WSREP) SET(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} ${ARG_COMPONENT} PARENT_SCOPE) - SET(CPACK_RPM_${ARG_COMPONENT}_PACKAGE_REQUIRES "MariaDB-server" PARENT_SCOPE) + SET(CPACK_RPM_${ARG_COMPONENT}_PACKAGE_REQUIRES "MariaDB" PARENT_SCOPE) IF (NOT ARG_CONFIG) SET(ARG_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/${target}.cnf") diff --git a/config.h.cmake b/config.h.cmake index c48d17fada0..8faf887730a 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -520,6 +520,11 @@ #endif #define PSAPI_VERSION 1 /* for GetProcessMemoryInfo() */ +/* We don't want the min/max macros */ +#ifdef __WIN__ +#define NOMINMAX +#endif + /* MySQL features */ diff --git a/dbug/CMakeLists.txt b/dbug/CMakeLists.txt index 3d0b0801132..c40c70b684d 100644 --- a/dbug/CMakeLists.txt +++ b/dbug/CMakeLists.txt @@ -24,8 +24,10 @@ TARGET_LINK_LIBRARIES(dbug mysys) ADD_EXECUTABLE(tests tests.c) TARGET_LINK_LIBRARIES(tests dbug) -ADD_EXECUTABLE(factorial my_main.c factorial.c) -TARGET_LINK_LIBRARIES(factorial dbug) +IF(NOT CMAKE_CROSSCOMPILING) + ADD_EXECUTABLE(factorial my_main.c factorial.c) + TARGET_LINK_LIBRARIES(factorial dbug) +ENDIF() IF(NOT WIN32 AND NOT CMAKE_GENERATOR MATCHES Xcode) FIND_PROGRAM(GROFF groff) @@ -36,11 +38,11 @@ IF(NOT WIN32 AND NOT CMAKE_GENERATOR MATCHES Xcode) SET(SOURCE_INC factorial.r main.r example1.r example2.r example3.r) ADD_CUSTOM_COMMAND(OUTPUT ${OUTPUT_INC} DEPENDS factorial - COMMAND ./factorial 1 2 3 4 5 > output1.r - COMMAND ./factorial -\#t:o 2 3 > output2.r - COMMAND ./factorial -\#d:t:o 3 > output3.r - COMMAND ./factorial -\#d,result:o 4 > output4.r - COMMAND ./factorial -\#d:f,factorial:F:L:o 3 > output5.r) + COMMAND factorial 1 2 3 4 5 > output1.r + COMMAND factorial -\#t:o 2 3 > output2.r + COMMAND factorial -\#d:t:o 3 > output3.r + COMMAND factorial -\#d,result:o 4 > output4.r + COMMAND factorial -\#d:f,factorial:F:L:o 3 > output5.r) FOREACH(file ${SOURCE_INC}) STRING(REGEX REPLACE "\\.r" ".c" srcfile ${file}) ADD_CUSTOM_COMMAND(OUTPUT ${file} DEPENDS ${srcfile} diff --git a/extra/yassl/CMakeLists.txt b/extra/yassl/CMakeLists.txt index 08e0f49d8a2..23404a661d6 100644 --- a/extra/yassl/CMakeLists.txt +++ b/extra/yassl/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -33,7 +33,6 @@ SET(YASSL_SOURCES src/buffer.cpp src/cert_wrapper.cpp src/crypto_wrapper.cpp sr ADD_CONVENIENCE_LIBRARY(yassl ${YASSL_SOURCES}) RESTRICT_SYMBOL_EXPORTS(yassl) -INSTALL_DEBUG_SYMBOLS(yassl) IF(MSVC) INSTALL_DEBUG_TARGET(yassl DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp index 9352423de2a..356b310037e 100644 --- a/extra/yassl/src/ssl.cpp +++ b/extra/yassl/src/ssl.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005, 2012, Oracle and/or its affiliates. + Copyright (c) 2005, 2014, Oracle and/or its affiliates. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -791,7 +791,10 @@ int SSL_CTX_load_verify_locations(SSL_CTX* ctx, const char* file, strncpy(name, path, MAX_PATH - 1 - HALF_PATH); strncat(name, "/", 1); strncat(name, entry->d_name, HALF_PATH); - if (stat(name, &buf) < 0) return SSL_BAD_STAT; + if (stat(name, &buf) < 0) { + closedir(dir); + return SSL_BAD_STAT; + } if (S_ISREG(buf.st_mode)) ret = read_file(ctx, name, SSL_FILETYPE_PEM, CA); diff --git a/extra/yassl/taocrypt/CMakeLists.txt b/extra/yassl/taocrypt/CMakeLists.txt index 84f1fc186e4..eeed35fd6f4 100644 --- a/extra/yassl/taocrypt/CMakeLists.txt +++ b/extra/yassl/taocrypt/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -32,7 +32,6 @@ SET(TAOCRYPT_SOURCES src/aes.cpp src/aestables.cpp src/algebra.cpp src/arc4.cpp ADD_CONVENIENCE_LIBRARY(taocrypt ${TAOCRYPT_SOURCES}) RESTRICT_SYMBOL_EXPORTS(taocrypt) -INSTALL_DEBUG_SYMBOLS(taocrypt) IF(MSVC) INSTALL_DEBUG_TARGET(taocrypt DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/extra/yassl/taocrypt/include/asn.hpp b/extra/yassl/taocrypt/include/asn.hpp index c58c7579ccf..b826bf54f8d 100644 --- a/extra/yassl/taocrypt/include/asn.hpp +++ b/extra/yassl/taocrypt/include/asn.hpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -296,11 +296,11 @@ private: byte* signature_; char issuer_[ASN_NAME_MAX]; // Names char subject_[ASN_NAME_MAX]; // Names - char beforeDate_[MAX_DATE_SZ]; // valid before date + char beforeDate_[MAX_DATE_SZ+1]; // valid before date, +null term byte beforeDateType_; // beforeDate time type - char afterDate_[MAX_DATE_SZ]; // valid after date + char afterDate_[MAX_DATE_SZ+1]; // valid after date, +null term byte afterDateType_; // afterDate time type - bool verify_; // Default to yes, but could be off + bool verify_; // Default to yes, but could be off void ReadHeader(); void Decode(SignerList*, CertType); diff --git a/include/keycache.h b/include/keycache.h index 8fa9bf1cd18..85937ebefb9 100644 --- a/include/keycache.h +++ b/include/keycache.h @@ -67,11 +67,13 @@ typedef enum key_cache_type typedef int (*INIT_KEY_CACHE) (void *, uint key_cache_block_size, - size_t use_mem, uint division_limit, uint age_threshold); + size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size); typedef int (*RESIZE_KEY_CACHE) (void *, uint key_cache_block_size, - size_t use_mem, uint division_limit, uint age_threshold); + size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size); typedef void (*CHANGE_KEY_CACHE_PARAM) (void *keycache_cb, @@ -146,6 +148,7 @@ typedef struct st_key_cache ulonglong param_division_limit;/* min. percentage of warm blocks */ ulonglong param_age_threshold; /* determines when hot block is downgraded */ ulonglong param_partitions; /* number of the key cache partitions */ + ulonglong changed_blocks_hash_size; /* number of hash buckets for changed files */ my_bool key_cache_inited; /* <=> key cache has been created */ my_bool can_be_used; /* usage of cache for read/write is allowed */ my_bool in_init; /* set to 1 in MySQL during init/resize */ @@ -160,10 +163,11 @@ extern KEY_CACHE dflt_key_cache_var, *dflt_key_cache; extern int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions); + uint age_threshold, uint changed_blocks_hash_size, + uint partitions); extern int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold); + uint age_threshold, uint changed_blocks_hash_size); extern void change_key_cache_param(KEY_CACHE *keycache, uint division_limit, uint age_threshold); extern uchar *key_cache_read(KEY_CACHE *keycache, @@ -202,6 +206,7 @@ extern int repartition_key_cache(KEY_CACHE *keycache, size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size, uint partitions); C_MODE_END #endif /* _keycache_h */ diff --git a/include/mysql.h.pp b/include/mysql.h.pp index ca5b1ac05bf..6b60389acc3 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -49,9 +49,9 @@ enum enum_field_types { MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, MYSQL_TYPE_DATETIME, MYSQL_TYPE_YEAR, MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, MYSQL_TYPE_BIT, - MYSQL_TYPE_TIMESTAMP2, - MYSQL_TYPE_DATETIME2, - MYSQL_TYPE_TIME2, + MYSQL_TYPE_TIMESTAMP2, + MYSQL_TYPE_DATETIME2, + MYSQL_TYPE_TIME2, MYSQL_TYPE_NEWDECIMAL=246, MYSQL_TYPE_ENUM=247, MYSQL_TYPE_SET=248, @@ -94,7 +94,7 @@ my_bool net_write_command(NET *net,unsigned char command, const unsigned char *header, size_t head_len, const unsigned char *packet, size_t len); int net_real_write(NET *net,const unsigned char *packet, size_t len); -unsigned long my_net_read(NET *net); +unsigned long my_net_read_packet(NET *net, my_bool read_from_server); struct sockaddr; int my_connect(my_socket s, const struct sockaddr *name, unsigned int namelen, unsigned int timeout); @@ -553,7 +553,7 @@ int mysql_list_processes_cont(MYSQL_RES **ret, MYSQL *mysql, int mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg); int mysql_options4(MYSQL *mysql,enum mysql_option option, - const void *arg1, const void *arg2); + const void *arg1, const void *arg2); void mysql_free_result(MYSQL_RES *result); int mysql_free_result_start(MYSQL_RES *result); int mysql_free_result_cont(MYSQL_RES *result, int status); diff --git a/include/mysql_com.h b/include/mysql_com.h index 9486bd3ebb7..43be28f87a0 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -541,7 +541,8 @@ my_bool net_write_command(NET *net,unsigned char command, const unsigned char *header, size_t head_len, const unsigned char *packet, size_t len); int net_real_write(NET *net,const unsigned char *packet, size_t len); -unsigned long my_net_read(NET *net); +unsigned long my_net_read_packet(NET *net, my_bool read_from_server); +#define my_net_read(A) my_net_read_packet((A), 0) #ifdef MY_GLOBAL_INCLUDED void my_net_set_write_timeout(NET *net, uint timeout); diff --git a/include/thread_pool_priv.h b/include/thread_pool_priv.h index 449c8ded66b..4270c32c826 100644 --- a/include/thread_pool_priv.h +++ b/include/thread_pool_priv.h @@ -1,6 +1,6 @@ #error don't use /* - Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/libmysql/CMakeLists.txt b/libmysql/CMakeLists.txt index 6897031b1f9..f6b23d047d1 100644 --- a/libmysql/CMakeLists.txt +++ b/libmysql/CMakeLists.txt @@ -406,7 +406,6 @@ SET(LIBS clientlib dbug strings vio mysys mysys_ssl ${ZLIB_LIBRARY} ${SSL_LIBRAR MERGE_LIBRARIES(mysqlclient STATIC ${LIBS} COMPONENT Development) # Visual Studio users need debug static library for debug projects -INSTALL_DEBUG_SYMBOLS(clientlib) IF(MSVC) INSTALL_DEBUG_TARGET(mysqlclient DESTINATION ${INSTALL_LIBDIR}/debug) INSTALL_DEBUG_TARGET(clientlib DESTINATION ${INSTALL_LIBDIR}/debug) @@ -447,6 +446,10 @@ IF(NOT DISABLE_SHARED) SOVERSION "${SHARED_LIB_MAJOR_VERSION}") IF(LINK_FLAG_NO_UNDEFINED OR VERSION_SCRIPT_LINK_FLAGS) GET_TARGET_PROPERTY(libmysql_link_flags libmysql LINK_FLAGS) + IF(NOT libmysql_link_flags) + # Avoid libmysql_link_flags-NOTFOUND + SET(libmysql_link_flags) + ENDIF() SET_TARGET_PROPERTIES(libmysql PROPERTIES LINK_FLAGS "${libmysql_link_flags} ${LINK_FLAG_NO_UNDEFINED} ${VERSION_SCRIPT_LINK_FLAGS}") ENDIF() diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt index f2842959c3f..c4383b31a17 100644 --- a/man/CMakeLists.txt +++ b/man/CMakeLists.txt @@ -14,6 +14,7 @@ # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA SET(MAN1_SERVER innochecksum.1 my_print_defaults.1 myisam_ftdump.1 myisamchk.1 + aria_chk.1 aria_dump_log.1 aria_ftdump.1 aria_pack.1 aria_read_log.1 myisamlog.1 myisampack.1 mysql.server.1 mysql_convert_table_format.1 mysql_fix_extensions.1 mysql_install_db.1 diff --git a/man/aria_pack.1 b/man/aria_pack.1 index 9cec33a3818..ee47f5ff3bb 100644 --- a/man/aria_pack.1 +++ b/man/aria_pack.1 @@ -1,6 +1,6 @@ .TH ARIA_PACK "1" "May 2014" "aria_pack Ver 1.0" "User Commands" .SH NAME -aria_pack \- manual page for aria_pack Ver 1.0 +aria_pack \- generate compressed, read\-only Aria tables .SH SYNOPSIS .B aria_pack [\fIOPTIONS\fR] \fIfilename\fR... diff --git a/man/mysqlbinlog.1 b/man/mysqlbinlog.1 index cc0f62485b5..5e9bc6c2f43 100644 --- a/man/mysqlbinlog.1 +++ b/man/mysqlbinlog.1 @@ -1255,7 +1255,7 @@ indicates a FORMAT_DESCRIPTION_EVENT\&. The following table lists the possible type codes\&. .TS allbox tab(:); -l l l. +l l lx. T{ Type T}:T{ @@ -1389,6 +1389,7 @@ T} T{ 0f T}:T{ +.nf FORMAT_DESCRIPTION_EVENT T}:T{ This indicates the start of a log file written by MySQL 5 or later\&. @@ -1526,7 +1527,7 @@ Master Pos: The position of the next event in the original master log file\&. Flags: 16 flags\&. Currently, the following flags are used\&. The others are reserved for future use\&. .TS allbox tab(:); -l l l. +l l lx. T{ Flag T}:T{ @@ -1537,6 +1538,7 @@ T} T{ 01 T}:T{ +.nf LOG_EVENT_BINLOG_IN_USE_F T}:T{ Log file correctly closed\&. (Used only in @@ -1558,6 +1560,7 @@ T} T{ 04 T}:T{ +.nf LOG_EVENT_THREAD_SPECIFIC_F T}:T{ Set if the event is dependent on the connection it was executed in (for diff --git a/man/mysqldump.1 b/man/mysqldump.1 index 59d2416b25e..fdca51f091a 100644 --- a/man/mysqldump.1 +++ b/man/mysqldump.1 @@ -2027,7 +2027,7 @@ value, an empty string, and the string value are distinguished from one another in the output generated by this option as follows\&. .TS allbox tab(:); -l l. +l lx. T{ \fBValue\fR: T}:T{ diff --git a/mysql-test/include/gis_debug.inc b/mysql-test/include/gis_debug.inc new file mode 100644 index 00000000000..c81932ef90c --- /dev/null +++ b/mysql-test/include/gis_debug.inc @@ -0,0 +1,161 @@ +# +# This is a shared file included from t/gis-precise.test and t/gis-debug.test +# +# - gis-precise.test is executed both in debug and production builds +# and makes sure that the checked GIS functions return the expected results. +# +# - gis-debug.test is executed only in debug builds +# (and is skipped in production builds). +# gis-debug.test activates tracing of the internal GIS routines. +# The trace log is printed to the client side warnings. +# So gis-debug.test makes sure not only that the correct results are returned, +# but also check *how* these results were generated - makes sure that +# the internal GIS routines went through the expected data and code flow paths. +# + +--disable_warnings +DROP TABLE IF EXISTS p1; +--enable_warnings + +DELIMITER |; +CREATE PROCEDURE p1(dist DOUBLE, geom TEXT) +BEGIN + DECLARE g GEOMETRY; + SET g=GeomFromText(geom); + SELECT geom AS `-----`; + SELECT dist, GeometryType(@buf:=ST_Buffer(g, dist)) AS `buffer`, ROUND(ST_AREA(@buf),2) AS buf_area; +END| +DELIMITER ;| + +--disable_query_log + +--echo # +--echo # Testing ST_BUFFER with positive distance +--echo # + +CALL p1(1, 'POINT(0 0))'); +CALL p1(1, 'LineString(0 1, 1 1))'); +CALL p1(1, 'LineString(9 9,8 1,1 5,0 0)'); +CALL p1(1, 'Polygon((2 2,2 8,8 8,8 2,2 2))'); +CALL p1(1, 'Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2))'); +CALL p1(1, 'Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0))'); +CALL p1(1, 'MultiPoint(9 9,8 1,1 5)'); +CALL p1(1, 'MultiLineString((0 0,2 2))'); +CALL p1(1, 'MultiLineString((0 0,2 2,0 4))'); +CALL p1(1, 'MultiLineString((0 0,2 2),(0 2,2 0))'); +CALL p1(1, 'MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14))'); +CALL p1(1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9)))'); +CALL p1(1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9)))'); +CALL p1(1, 'GeometryCollection(Point(0 0))'); +CALL p1(1, 'GeometryCollection(LineString(0 0, 2 2)))'); +CALL p1(1, 'GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2))))'); +CALL p1(1, 'GeometryCollection(MultiPoint(9 9,8 1,1 5))'); +CALL p1(1, 'GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1)))'); +CALL p1(1, 'GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6))))'); +CALL p1(1, 'GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2)))'); + + +--echo # +--echo # Testing ST_BUFFER with zero distance +--echo # + +CALL p1(0, 'POINT(0 0))'); +CALL p1(0, 'LineString(0 1, 1 1))'); +CALL p1(0, 'LineString(9 9,8 1,1 5,0 0)'); +CALL p1(0, 'Polygon((2 2,2 8,8 8,8 2,2 2))'); +CALL p1(0, 'Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2))'); +CALL p1(0, 'Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0))'); +CALL p1(0, 'MultiPoint(9 9,8 1,1 5)'); +CALL p1(0, 'MultiLineString((0 0,2 2))'); +CALL p1(0, 'MultiLineString((0 0,2 2,0 4))'); +CALL p1(0, 'MultiLineString((0 0,2 2),(0 2,2 0))'); +CALL p1(0, 'MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14))'); +CALL p1(0, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9)))'); +CALL p1(0, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9)))'); +CALL p1(0, 'GeometryCollection(Point(0 0))'); +CALL p1(0, 'GeometryCollection(LineString(0 0, 2 2)))'); +CALL p1(0, 'GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2))))'); +CALL p1(0, 'GeometryCollection(MultiPoint(9 9,8 1,1 5))'); +CALL p1(0, 'GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1)))'); +CALL p1(0, 'GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6))))'); +CALL p1(0, 'GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2)))'); + + +--echo # +--echo # Testing ST_BUFFER with negative distance +--echo # + +CALL p1(-1, 'POINT(0 0))'); +CALL p1(-1, 'LineString(0 1, 1 1))'); +CALL p1(-1, 'LineString(9 9,8 1,1 5,0 0)'); +CALL p1(-1, 'Polygon((2 2,2 8,8 8,8 2,2 2))'); +# +# Wrong shape +# CALL p1(-1, 'Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2))'); +# Wrong shape +# CALL p1(-1, 'Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0))'); +# +CALL p1(-1, 'MultiPoint(9 9,8 1,1 5)'); +CALL p1(-1, 'MultiLineString((0 0,2 2))'); +CALL p1(-1, 'MultiLineString((0 0,2 2,0 4))'); +CALL p1(-1, 'MultiLineString((0 0,2 2),(0 2,2 0))'); +CALL p1(-1, 'MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14))'); +# +# Wrong shape +#CALL p1(-1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9)))'); +#CALL p1(-1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9)))'); +# +CALL p1(-1, 'GeometryCollection(Point(0 0))'); +CALL p1(-1, 'GeometryCollection(LineString(0 0, 2 2)))'); +CALL p1(-1, 'GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2))))'); +CALL p1(-1, 'GeometryCollection(MultiPoint(9 9,8 1,1 5))'); +CALL p1(-1, 'GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1)))'); +# +# Wrong shape +# CALL p1(-1, 'GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6))))'); +# +CALL p1(-1, 'GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2)))'); + + +--enable_query_log + +SELECT ST_CONTAINS( + GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), + GeomFromText('POINT(5 10)')); +SELECT AsText(ST_UNION( + GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), + GeomFromText('POINT(5 10)'))); + +DROP PROCEDURE p1; + +--echo # +--echo # Bug #13833019 ASSERTION `T1->RESULT_RANGE' FAILED IN GCALC_OPERATION_REDUCER::END_COUPLE +--echo # +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)); + +--echo # +--echo # Bug #13832749 HANDLE_FATAL_SIGNAL IN GCALC_FUNCTION::COUNT_INTERNAL +--echo # +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)); + + +--echo # +--echo # Bug#13358363 - ASSERTION: N > 0 && N < SINUSES_CALCULATED*2+1 | GET_N_SINCOS/ADD_EDGE_BUFFER +--echo # + +DO ST_BUFFER(ST_GEOMCOLLFROMTEXT('linestring(1 1,2 2)'),''); + +SELECT ST_WITHIN( + LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), + ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) '), + ST_NUMINTERIORRINGS(POLYGONFROMTEXT('POLYGON((3 5,2 4,2 5,3 5)) ')))); + +SELECT ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), + ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))); + +SELECT ST_NUMINTERIORRINGS( + ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), + SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))); + +SELECT ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), + SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))); diff --git a/mysql-test/include/gis_generic.inc b/mysql-test/include/gis_generic.inc index e4fee4448c1..59acf04dfa2 100644 --- a/mysql-test/include/gis_generic.inc +++ b/mysql-test/include/gis_generic.inc @@ -72,6 +72,19 @@ INSERT into gis_geometry SELECT * FROM gis_multi_line; INSERT into gis_geometry SELECT * FROM gis_multi_polygon; INSERT into gis_geometry SELECT * FROM gis_geometrycollection; +-- disable_query_log +-- disable_result_log +ANALYZE TABLE gis_point; +ANALYZE TABLE gis_line; +ANALYZE TABLE gis_polygon; +ANALYZE TABLE gis_multi_point; +ANALYZE TABLE gis_multi_line; +ANALYZE TABLE gis_multi_polygon; +ANALYZE TABLE gis_geometrycollection; +ANALYZE TABLE gis_geometry; +-- enable_result_log +-- enable_query_log + SELECT fid, AsText(g) FROM gis_point ORDER by fid; SELECT fid, AsText(g) FROM gis_line ORDER by fid; SELECT fid, AsText(g) FROM gis_polygon ORDER by fid; diff --git a/mysql-test/include/have_mysql_upgrade.inc b/mysql-test/include/have_mysql_upgrade.inc deleted file mode 100644 index 8f486176018..00000000000 --- a/mysql-test/include/have_mysql_upgrade.inc +++ /dev/null @@ -1,4 +0,0 @@ ---require r/have_mysql_upgrade.result ---disable_query_log -select LENGTH("$MYSQL_UPGRADE")>0 as have_mysql_upgrade; ---enable_query_log diff --git a/mysql-test/include/have_semisync_plugin.inc b/mysql-test/include/have_semisync_plugin.inc new file mode 100644 index 00000000000..8a1679de636 --- /dev/null +++ b/mysql-test/include/have_semisync_plugin.inc @@ -0,0 +1,15 @@ +# +# Check if server has support for loading plugins +# +if (`SELECT @@have_dynamic_loading != 'YES'`) { + --skip Requires dynamic loading +} + +# +# Check if the variable SEMISYNC_MASTER_SO is set +# +if (!$SEMISYNC_MASTER_SO) +{ + skip Need semisync plugins; +} + diff --git a/mysql-test/include/install_semisync.inc b/mysql-test/include/install_semisync.inc index 368b7b7cb4a..9cc6df2072a 100644 --- a/mysql-test/include/install_semisync.inc +++ b/mysql-test/include/install_semisync.inc @@ -14,7 +14,7 @@ if ($value == No such row) { SET sql_log_bin = 0; - eval INSTALL PLUGIN rpl_semi_sync_master SONAME '$SEMISYNC_MASTER_PLUGIN'; + install plugin rpl_semi_sync_master soname 'semisync_master'; SET GLOBAL rpl_semi_sync_master_enabled = 1; SET sql_log_bin = 1; } @@ -28,7 +28,7 @@ if ($value == No such row) if ($value == No such row) { SET sql_log_bin = 0; - eval INSTALL PLUGIN rpl_semi_sync_slave SONAME '$SEMISYNC_SLAVE_PLUGIN'; + install plugin rpl_semi_sync_slave soname 'semisync_slave'; SET GLOBAL rpl_semi_sync_slave_enabled = 1; SET sql_log_bin = 1; } diff --git a/mysql-test/include/mtr_warnings.sql b/mysql-test/include/mtr_warnings.sql index 06a7b49e979..97309be0857 100644 --- a/mysql-test/include/mtr_warnings.sql +++ b/mysql-test/include/mtr_warnings.sql @@ -159,6 +159,7 @@ INSERT INTO global_suppressions VALUES ("InnoDB: Error: in ALTER TABLE `test`.`t[123]`"), ("InnoDB: Error: in RENAME TABLE table `test`.`t1`"), ("InnoDB: Error: table `test`.`t[123]` .*does not exist in the InnoDB internal"), + ("InnoDB: Warning: semaphore wait:"), /* BUG#32080 - Excessive warnings on Solaris: setrlimit could not diff --git a/mysql-test/include/mysql_upgrade_preparation.inc b/mysql-test/include/mysql_upgrade_preparation.inc index a3c81c4c1e7..03019ae29ff 100644 --- a/mysql-test/include/mysql_upgrade_preparation.inc +++ b/mysql-test/include/mysql_upgrade_preparation.inc @@ -2,13 +2,6 @@ # Can't run test of external client with embedded server -- source include/not_embedded.inc - -# Only run test if "mysql_upgrade" is found ---require r/have_mysql_upgrade.result ---disable_query_log -select LENGTH("$MYSQL_UPGRADE")>0 as have_mysql_upgrade; ---enable_query_log - # # Hack: # diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index c047b5bc499..0d09cdcd36e 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -10,6 +10,10 @@ # The environment variables SEARCH_FILE and SEARCH_PATTERN must be set # before sourcing this routine. # +# Optionally, SEARCH_RANGE can be set to the max number of bytes of the file +# to search. If negative, it will search that many bytes at the end of the +# file. The default is to search only the first 50000 bytes of the file. +# # In case of # - SEARCH_FILE and/or SEARCH_PATTERN is not set # - SEARCH_FILE cannot be opened @@ -38,6 +42,7 @@ # --error 0,1 # --remove_file $error_log # let SEARCH_FILE= $error_log; +# let SEARCH_RANGE= -50000; # # Stop the server # let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; # --exec echo "wait" > $restart_file @@ -57,8 +62,18 @@ perl; use strict; my $search_file= $ENV{'SEARCH_FILE'} or die "SEARCH_FILE not set"; my $search_pattern= $ENV{'SEARCH_PATTERN'} or die "SEARCH_PATTERN not set"; + my $search_range= $ENV{'SEARCH_RANGE'}; + my $file_content; + $search_range= 50000 unless $search_range =~ /-?[0-9]+/; open(FILE, "$search_file") or die("Unable to open '$search_file': $!\n"); - read(FILE, my $file_content, 50000, 0); + if ($search_range >= 0) { + read(FILE, $file_content, $search_range, 0); + } else { + my $size= -s $search_file; + $search_range = -$size if $size > -$search_range; + seek(FILE, $search_range, 2); + read(FILE, $file_content, -$search_range, 0); + } close(FILE); if ( not $file_content =~ m{$search_pattern} ) { die("# ERROR: The file '$search_file' does not contain the expected pattern $search_pattern\n->$file_content<-\n"); diff --git a/mysql-test/include/show_events.inc b/mysql-test/include/show_events.inc index f7b0931c812..9a39ec67d0e 100644 --- a/mysql-test/include/show_events.inc +++ b/mysql-test/include/show_events.inc @@ -83,7 +83,7 @@ let $script= s{block_len=[0-9]+}{block_len=#}; s{Server ver:.*DOLLAR}{SERVER_VERSION, BINLOG_VERSION}; s{GTID [0-9]+-[0-9]+-[0-9]+}{GTID #-#-#}; - s{\[[0-9]-[0-9]-[0-9]+\]}{[#-#-#]}; + s{\[([0-9]-[0-9]-[0-9]+,?)+\]}{[#-#-#]}; s{cid=[0-9]+}{cid=#}; s{SQL_LOAD-[a-z,0-9,-]*.[a-z]*}{SQL_LOAD-<SERVER UUID>-<MASTER server-id>-<file-id>.<extension>}; s{rand_seed1=[0-9]*,rand_seed2=[0-9]*}{rand_seed1=<seed 1>,rand_seed2=<seed 2>}; diff --git a/mysql-test/include/stop_dump_threads.inc b/mysql-test/include/stop_dump_threads.inc new file mode 100644 index 00000000000..ae33c963d9a --- /dev/null +++ b/mysql-test/include/stop_dump_threads.inc @@ -0,0 +1,32 @@ +# ==== Purpose ==== +# +# Stop all dump threads on the server of the current connection. +# +# ==== Usage ==== +# +# --source include/stop_dump_threads.inc + +--let $include_filename= stop_dump_threads.inc +--source include/begin_include_file.inc + + +--let $_sdt_show_rpl_debug_info_old= $show_rpl_debug_info +--let $show_rpl_debug_info= 1 +--disable_query_log +--disable_result_log + +--let $_sdt_dump_thread_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND = 'Binlog dump'` + +while ($_sdt_dump_thread_id != '') +{ + eval KILL $_sdt_dump_thread_id; + --let $wait_condition= SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE ID = $_sdt_dump_thread_id + --source include/wait_condition.inc + + --let $_sdt_dump_thread_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND = 'Binlog dump'` +} + +--let $show_rpl_debug_info= $_sdt_show_rpl_debug_info_old + +--let $include_filename= stop_dump_threads.inc +--source include/end_include_file.inc diff --git a/mysql-test/include/uninstall_semisync.inc b/mysql-test/include/uninstall_semisync.inc index 11668d1db97..0a4c55fa4f2 100644 --- a/mysql-test/include/uninstall_semisync.inc +++ b/mysql-test/include/uninstall_semisync.inc @@ -13,6 +13,11 @@ UNINSTALL PLUGIN rpl_semi_sync_slave; --connection master +# After BUG#17638477 fix, uninstallation of rpl_semi_sync_master +# is not allowed when there are semi sync slaves. Hence kill +# all dump threads before uninstalling it. +SET GLOBAL rpl_semi_sync_master_enabled = OFF; +--source include/stop_dump_threads.inc UNINSTALL PLUGIN rpl_semi_sync_master; --enable_warnings diff --git a/mysql-test/mtr.out-of-source b/mysql-test/mtr.out-of-source index c2809ede136..51713517ae1 100644 --- a/mysql-test/mtr.out-of-source +++ b/mysql-test/mtr.out-of-source @@ -1,5 +1,5 @@ #!/usr/bin/perl # Call mtr in out-of-source build -$ENV{MTR_BINDIR} = "@CMAKE_BINARY_DIR@"; -chdir("@CMAKE_SOURCE_DIR@/mysql-test"); -exit(system($^X, "@CMAKE_SOURCE_DIR@/mysql-test/mysql-test-run.pl", @ARGV) >> 8);
\ No newline at end of file +$ENV{MTR_BINDIR} = '@CMAKE_BINARY_DIR@'; +chdir('@CMAKE_SOURCE_DIR@/mysql-test'); +exit(system($^X, '@CMAKE_SOURCE_DIR@/mysql-test/mysql-test-run.pl', @ARGV) >> 8); diff --git a/mysql-test/r/ctype_ucs.result b/mysql-test/r/ctype_ucs.result index 492c9877917..c4236af1697 100644 --- a/mysql-test/r/ctype_ucs.result +++ b/mysql-test/r/ctype_ucs.result @@ -4508,6 +4508,39 @@ COALESCE(c1) DROP TABLE t1; # +# MDEV-5745 analyze MySQL fix for bug#12368495 +# +SELECT CHAR_LENGTH(TRIM(LEADING 0x000000 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(LEADING 0x000000 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(LEADING 0x00 FROM _ucs2 0x0061)) +1 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x000000 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(TRAILING 0x000000 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _ucs2 0x0061)) +1 +SELECT CHAR_LENGTH(TRIM(BOTH 0x000000 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x000000 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x61 FROM _ucs2 0x0061)) +1 +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x00 FROM _ucs2 0x0061)) +1 +# # End of 5.5 tests # # diff --git a/mysql-test/r/ctype_upgrade.result b/mysql-test/r/ctype_upgrade.result index 0fc73203494..3d23280c2e8 100644 --- a/mysql-test/r/ctype_upgrade.result +++ b/mysql-test/r/ctype_upgrade.result @@ -227,13 +227,8 @@ DROP TABLE mysql050614_xxx_croatian_ci; # Checking mysql_upgrade # # Running mysql_upgrade -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -263,6 +258,13 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test test.maria050313_ucs2_croatian_ci_def Needs upgrade @@ -276,17 +278,11 @@ test.maria050313_ucs2_croatian_ci_def OK test.maria050313_utf8_croatian_ci OK test.maria050533_xxx_croatian_ci OK test.maria100004_xxx_croatian_ci OK -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK # Running mysql_upgrade for the second time # This should report OK for all tables -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -316,6 +312,13 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test test.maria050313_ucs2_croatian_ci_def OK @@ -323,7 +326,6 @@ test.maria050313_utf8_croatian_ci OK test.maria050533_xxx_croatian_ci OK test.maria100004_xxx_croatian_ci OK test.mysql050614_xxx_croatian_ci OK -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK SHOW CREATE TABLE maria050313_ucs2_croatian_ci_def; Table Create Table diff --git a/mysql-test/r/ctype_utf32.result b/mysql-test/r/ctype_utf32.result index 214ec9f9b1d..01edaac88d7 100644 --- a/mysql-test/r/ctype_utf32.result +++ b/mysql-test/r/ctype_utf32.result @@ -1626,6 +1626,39 @@ SELECT '2010-10-10 10:10:10' + INTERVAL GeometryType(GeomFromText('POINT(1 1)')) '2010-10-10 10:10:10' + INTERVAL GeometryType(GeomFromText('POINT(1 1)')) hour_second 2010-10-10 10:10:10 # +# MDEV-5745 analyze MySQL fix for bug#12368495 +# +SELECT CHAR_LENGTH(TRIM(LEADING 0x0000000000 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(LEADING 0x0000000000 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(LEADING 0x00 FROM _utf32 0x00000061)) +1 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0000000000 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(TRAILING 0x0000000000 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _utf32 0x00000061)) +3 +SELECT CHAR_LENGTH(TRIM(BOTH 0x0000000000 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x0000000000 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x61 FROM _utf32 0x00000061)) +3 +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x00 FROM _utf32 0x00000061)) +1 +# # End of 5.5 tests # # diff --git a/mysql-test/r/features.result b/mysql-test/r/features.result index 7b6a352ab0c..66d2c6bf71d 100644 --- a/mysql-test/r/features.result +++ b/mysql-test/r/features.result @@ -1,6 +1,8 @@ drop table if exists t1; +flush status; show status like "feature%"; Variable_name Value +Feature_delay_key_write 0 Feature_dynamic_columns 0 Feature_fulltext 0 Feature_gis 0 @@ -138,3 +140,17 @@ upd1 show status like "feature_xml"; Variable_name Value Feature_xml 2 +# +# Feature delayed_keys +# +create table t1 (a int, key(a)) engine=myisam delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; +create table t1 (a int, key(a)) engine=aria delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; +show status like "feature_delay_key_write"; +Variable_name Value +Feature_delay_key_write 2 diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result index e5edabb0130..44a67fe6b27 100644 --- a/mysql-test/r/func_str.result +++ b/mysql-test/r/func_str.result @@ -2961,6 +2961,9 @@ replace(var, '00000000', table_name) (( t2 ++ t2 )) drop procedure foo; drop table t1,t2; +select md5(_filename "a"), sha(_filename "a"); +md5(_filename "a") sha(_filename "a") +0cc175b9c0f1b6a831c399e269772661 86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 # # End of 5.5 tests # diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index db68f08cbba..bf07595bc3a 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -2023,10 +2023,72 @@ SEC_TO_TIME(1.12)+0.1 decimal(14,2) YES NULL SEC_TO_TIME(1.123456)+0.1 decimal(18,6) YES NULL SEC_TO_TIME(1.1234567)+0.1 decimal(18,6) YES NULL DROP TABLE t1; +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))*1; +a +2000-02-23 +2005-05-04 +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10')))*1; +a +2005-05-04 +2000-02-23 +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10'))); +a +2005-05-04 +2000-02-23 +SELECT * FROM t1 GROUP BY ABS(FROM_UNIXTIME(concat(a,'10'))); +a +2000-02-23 +2005-05-04 +SELECT * FROM t1 GROUP BY @a:=(FROM_UNIXTIME(concat(a,'10'))*1); +a +2000-02-23 +2005-05-04 +DROP TABLE t1; +SET TIME_ZONE='+02:00'; +# +# MDEV-6302 Wrong result set when using GROUP BY FROM_UNIXTIME(...)+0 +# +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT a, FROM_UNIXTIME(CONCAT(a,'10')) AS f1, FROM_UNIXTIME(CONCAT(a,'10'))+0 AS f2 FROM t1; +a f1 f2 +2005-05-04 1970-01-01 02:33:25 19700101023325.000000 +2000-02-23 1970-01-01 02:33:20 19700101023320.000000 +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(CONCAT(a,'10'))+0; +a +2000-02-23 +2005-05-04 +DROP TABLE t1; +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))/1; +a +2000-02-23 +2005-05-04 +DROP TABLE t1; +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'); +SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +f2 +0.000000 +SELECT CHAR_LENGTH(CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10')))) AS f2 FROM t1; +f2 +8 +CREATE TABLE t2 AS SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `f2` varchar(26) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SELECT * FROM t2; +f2 +0.000000 +DROP TABLE t1,t2; # # MDEV-4635 Crash in UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')) # -SET TIME_ZONE='+02:00'; SELECT UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')); UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')) NULL @@ -2600,3 +2662,18 @@ SELECT COALESCE(TIME'10:20:30',DATE'2001-01-01'); COALESCE(TIME'10:20:30',DATE'2001-01-01') 2014-04-15 10:20:30 SET timestamp=DEFAULT; +# +# MDEV-5750 Assertion `ltime->year == 0' fails on a query with EXTRACT DAY_MINUTE and TIME column +# +CREATE TABLE t1 ( d DATE, t TIME ); +INSERT INTO t1 VALUES ('2008-12-05','22:34:09'),('2005-03-27','14:26:02'); +SELECT EXTRACT(DAY_MINUTE FROM GREATEST(t,d)), GREATEST(t,d) FROM t1; +EXTRACT(DAY_MINUTE FROM GREATEST(t,d)) GREATEST(t,d) +342259 838:59:59 +342259 838:59:59 +Warnings: +Warning 1292 Truncated incorrect time value: '9336:00:00' +Warning 1292 Truncated incorrect time value: '9336:00:00' +Warning 1292 Truncated incorrect time value: '2952:00:00' +Warning 1292 Truncated incorrect time value: '2952:00:00' +DROP TABLE t1; diff --git a/mysql-test/r/gis-debug.result b/mysql-test/r/gis-debug.result new file mode 100644 index 00000000000..8593f434c2b --- /dev/null +++ b/mysql-test/r/gis-debug.result @@ -0,0 +1,294 @@ +SET @tmp=ST_GIS_DEBUG(1); +DROP TABLE IF EXISTS p1; +CREATE PROCEDURE p1(dist DOUBLE, geom TEXT) +BEGIN +DECLARE g GEOMETRY; +SET g=GeomFromText(geom); +SELECT geom AS `-----`; +SELECT dist, GeometryType(@buf:=ST_Buffer(g, dist)) AS `buffer`, ROUND(ST_AREA(@buf),2) AS buf_area; +END| +# +# Testing ST_BUFFER with positive distance +# +----- +POINT(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +1 POLYGON 5.14 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +1 POLYGON 44.63 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +1 POLYGON 63.14 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +1 POLYGON 95.14 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +1 POLYGON 174.93 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +1 POLYGON 8.80 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +1 POLYGON 14.24 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +1 POLYGON 13.59 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +1 MULTIPOLYGON 70.06 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +1 POLYGON 8.80 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +1 POLYGON 63.14 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +1 MULTIPOLYGON 10.28 +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +1 MULTIPOLYGON 48.28 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +1 POLYGON 75.92 +# +# Testing ST_BUFFER with zero distance +# +----- +POINT(0 0)) +dist buffer buf_area +0 POINT 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +0 LINESTRING 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +0 LINESTRING 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +0 POLYGON 36.00 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +0 POLYGON 48.00 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +0 POLYGON 116.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +0 MULTIPOINT NULL +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 66.00 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 62.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 18.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +# +# Testing ST_BUFFER with negative distance +# +----- +POINT(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +-1 POLYGON 16.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +-1 POLYGON 16.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +-1 POLYGON 16.00 +SELECT ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')); +ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')) +0 +SELECT AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))); +AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))) +GEOMETRYCOLLECTION(POLYGON((0 0,0 5,5 5,5 0,0 0)),POLYGON((6 6,6 11,11 11,11 6,6 6)),POINT(5 10)) +DROP PROCEDURE p1; +# +# Bug #13833019 ASSERTION `T1->RESULT_RANGE' FAILED IN GCALC_OPERATION_REDUCER::END_COUPLE +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)) +POLYGON +# +# Bug #13832749 HANDLE_FATAL_SIGNAL IN GCALC_FUNCTION::COUNT_INTERNAL +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)) +POLYGON +# +# Bug#13358363 - ASSERTION: N > 0 && N < SINUSES_CALCULATED*2+1 | GET_N_SINCOS/ADD_EDGE_BUFFER +# +DO ST_BUFFER(ST_GEOMCOLLFROMTEXT('linestring(1 1,2 2)'),''); +SELECT ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT('POLYGON((3 5,2 4,2 5,3 5)) ')))); +ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) ') +0 +SELECT ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))); +ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))) +2 +SELECT ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))); +ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))) +0 +SELECT ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))); +ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))) +POLYGON((9 9,5 2,4 5,9 9)) diff --git a/mysql-test/r/gis-precise.result b/mysql-test/r/gis-precise.result index 71eed65b2ea..c0b8b85d216 100644 --- a/mysql-test/r/gis-precise.result +++ b/mysql-test/r/gis-precise.result @@ -156,19 +156,19 @@ POLYGON((1 0,0.950932325672582 0.001204543794827595,0.9019828596704393 0.0048152 create table t1(geom geometrycollection); insert into t1 values (geomfromtext('POLYGON((0 0, 10 10, 0 8, 0 0))')); insert into t1 values (geomfromtext('POLYGON((1 1, 10 10, 0 8, 1 1))')); -select astext(geom), area(geom),area(ST_buffer(geom,2)) from t1; -astext(geom) area(geom) area(ST_buffer(geom,2)) -POLYGON((0 0,10 10,0 8,0 0)) 40 117.2416763959153 -POLYGON((1 1,10 10,0 8,1 1)) 36 108.55539589266459 -select astext(ST_buffer(geom,2)) from t1; -astext(ST_buffer(geom,2)) -POLYGON((0 -2,-0.09813534865483604 -1.9975909124103448,-0.1960342806591212 -1.9903694533443936,-0.2934609489107236 -1.978353019929562,-0.3901806440322566 -1.9615705608064609,-0.4859603598065278 -1.940062506389088,-0.5805693545089246 -1.9138806714644179,-0.6737797067844402 -1.8830881303660416,-0.7653668647301796 -1.8477590650225735,-0.8551101868605642 -1.8079785862468867,-0.9427934736519952 -1.7638425286967099,-1.0282054883864433 -1.7154572200005442,-1.1111404660392044 -1.6629392246050905,-1.1913986089848667 -1.6064150629612897,-1.268786568327291 -1.546020906725474,-1.3431179096940367 -1.4819022507099182,-1.414213562373095 -1.414213562373095,-1.4819022507099182 -1.3431179096940367,-1.546020906725474 -1.268786568327291,-1.6064150629612897 -1.1913986089848667,-1.6629392246050905 -1.1111404660392044,-1.7154572200005442 -1.0282054883864433,-1.7638425286967099 -0.9427934736519952,-1.8079785862468867 -0.8551101868605642,-1.8477590650225735 -0.7653668647301796,-1.8830881303660416 -0.6737797067844402,-1.9138806714644179 -0.5805693545089246,-1.940062506389088 -0.4859603598065278,-1.9615705608064609 -0.3901806440322566,-1.978353019929562 -0.2934609489107236,-1.9903694533443936 -0.1960342806591212,-1.9975909124103448 -0.09813534865483604,-2 0,-2 8,-1.9976924709932495 8.096045777298562,-1.9905734200023315 8.193952209526529,-1.978658903288988 8.291391393893539,-1.9619776239675701 8.388128590869789,-1.940569768701071 8.483930752074583,-1.9144869108879337 8.578567081710304,-1.8837918864172196 8.67180959256969,-1.848558642291444 8.763433655277009,-1.8088720584817741 8.853218539439872,-1.7648277434447421 8.940947945408109,-1.716531803793098 9.02641052535855,-1.6641005886756872 9.109400392450459,-1.6076604094821603 9.189717616824955,-1.5473472355477698 9.267168707253568,-1.483306366591334 9.341567077275533,-1.415692082675486 9.412733494700944,-1.3446672725324915 9.480496513396783,-1.2704030411510234 9.54469288631567,-1.1930782975692584 9.605167958772302,-1.1128793238673322 9.661776041020147,-1.0299993263974934 9.714380759230782,-0.9446379703330767 9.762855384030411,-0.8570008986576141 9.807083135802014,-0.7672992367528719 9.846957466017683,-0.6757490837793165 9.882382313923348,-0.5825709920743072 9.913272337957553,-0.4879894358221987 9.939553121346753,-0.3922322702763681 9.96116135138184,9.607767729723632 11.96116135138184,9.609819355967744 11.96157056080646,9.706539051089276 11.978353019929562,9.803965719340878 11.990369453344393,9.901864651345164 11.997590912410345,10 12,10.098135348654836 11.997590912410345,10.196034280659122 11.990369453344393,10.293460948910724 11.978353019929562,10.390180644032256 11.96157056080646,10.485960359806528 11.940062506389088,10.580569354508924 11.913880671464417,10.67377970678444 11.88308813036604,10.76536686473018 11.847759065022574,10.855110186860564 11.807978586246886,10.942793473651996 11.76384252869671,11.028205488386444 11.715457220000545,11.111140466039204 11.66293922460509,11.191398608984866 11.60641506296129,11.268786568327291 11.546020906725474,11.343117909694037 11.481902250709918,11.414213562373096 11.414213562373096,11.481902250709918 11.343117909694037,11.546020906725474 11.268786568327291,11.60641506296129 11.191398608984866,11.66293922460509 11.111140466039204,11.715457220000545 11.028205488386444,11.76384252869671 10.942793473651996,11.807978586246886 10.855110186860564,11.847759065022574 10.76536686473018,11.88308813036604 10.67377970678444,11.913880671464417 10.580569354508924,11.940062506389088 10.485960359806528,11.96157056080646 10.390180644032256,11.978353019929562 10.293460948910724,11.990369453344393 10.196034280659122,11.997590912410345 10.098135348654836,12 10,11.997590912410345 9.901864651345164,11.990369453344393 9.803965719340878,11.978353019929562 9.706539051089276,11.96157056080646 9.609819355967744,11.940062506389088 9.514039640193472,11.913880671464417 9.419430645491076,11.88308813036604 9.32622029321556,11.847759065022574 9.23463313526982,11.807978586246886 9.144889813139436,11.76384252869671 9.057206526348004,11.715457220000545 8.971794511613556,11.66293922460509 8.888859533960796,11.60641506296129 8.808601391015134,11.546020906725474 8.731213431672709,11.481902250709918 8.656882090305963,11.414213562373096 8.585786437626904,1.414213562373095 -1.414213562373095,1.3431179096940367 -1.4819022507099182,1.268786568327291 -1.546020906725474,1.1913986089848667 -1.6064150629612897,1.1111404660392044 -1.6629392246050905,1.0282054883864433 -1.7154572200005442,0.9427934736519952 -1.7638425286967099,0.8551101868605642 -1.8079785862468867,0.7653668647301796 -1.8477590650225735,0.6737797067844402 -1.8830881303660416,0.5805693545089246 -1.9138806714644179,0.4859603598065278 -1.940062506389088,0.3901806440322566 -1.9615705608064609,0.2934609489107236 -1.978353019929562,0.1960342806591212 -1.9903694533443936,0.09813534865483604 -1.9975909124103448,0 -2)) -POLYGON((0.9892698494111194 -0.9999712157599518,0.8911488380683092 -0.9970356593075951,0.7932900587088283 -0.9892890690323013,0.6959292617035704 -0.9767501071485654,0.5993009977403192 -0.959448981113848,0.5036380527705995 -0.9374273708561667,0.40917088720792716 -0.9107383283634973,0.3161270807284893 -0.8794461498768888,0.2247307840117696 -0.843626220995187,0.13520217874192864 -0.8033648350645226,0.04775694717084156 -0.7587589852900836,-0.03739424747933939 -0.7099161310709878,-0.12004626852233802 -0.6569539391211774,-0.19999999999999996 -0.5999999999999999,-0.27706282637007584 -0.5391915207353741,-0.35104909653393324 -0.47467499428004234,-0.42178057108631606 -0.40660584659721555,-0.4890868517096818 -0.3351480622258147,-0.5528057916786753 -0.26047378922735365,-0.6127838864857904 -0.18276292446617926,-0.6688766436471771 -0.10220268022216916,-0.7209489307976877 -0.018987133179951154,-0.7688753012365837 0.06668324311882912,-0.8125402961396226 0.15460206123382925,-0.8518387227094812 0.2445575170314307,-0.8866759075944177 0.33633289993945015,-0.9169679249646674 0.4297071150218881,-0.9426417986971172 0.5244552156159955,-0.9636356781811806 0.6203489452484875,-0.9798989873223332 0.717157287525381,-1.9798989873223332 7.717157287525381,-1.990163308912474 7.8018838627003015,-1.9974871681520578 7.899775187364235,-1.9999989058443504 7.997907962380466,-1.9976924709932495 8.096045777298562,-1.9905734200023315 8.193952209526529,-1.978658903288988 8.291391393893539,-1.9619776239675701 8.388128590869789,-1.940569768701071 8.483930752074583,-1.9144869108879337 8.578567081710304,-1.8837918864172196 8.67180959256969,-1.848558642291444 8.763433655277009,-1.8088720584817741 8.853218539439872,-1.7648277434447421 8.940947945408109,-1.716531803793098 9.02641052535855,-1.6641005886756872 9.109400392450459,-1.6076604094821603 9.189717616824955,-1.5473472355477698 9.267168707253568,-1.483306366591334 9.341567077275533,-1.415692082675486 9.412733494700944,-1.3446672725324915 9.480496513396783,-1.2704030411510234 9.54469288631567,-1.1930782975692584 9.605167958772302,-1.1128793238673322 9.661776041020147,-1.0299993263974934 9.714380759230782,-0.9446379703330767 9.762855384030411,-0.8570008986576141 9.807083135802014,-0.7672992367528719 9.846957466017683,-0.6757490837793165 9.882382313923348,-0.5825709920743072 9.913272337957553,-0.4879894358221987 9.939553121346753,-0.3922322702763681 9.96116135138184,9.607767729723632 11.96116135138184,9.609819355967744 11.96157056080646,9.706539051089276 11.978353019929562,9.803965719340878 11.990369453344393,9.901864651345164 11.997590912410345,10 12,10.098135348654836 11.997590912410345,10.196034280659122 11.990369453344393,10.293460948910724 11.978353019929562,10.390180644032256 11.96157056080646,10.485960359806528 11.940062506389088,10.580569354508924 11.913880671464417,10.67377970678444 11.88308813036604,10.76536686473018 11.847759065022574,10.855110186860564 11.807978586246886,10.942793473651996 11.76384252869671,11.028205488386444 11.715457220000545,11.111140466039204 11.66293922460509,11.191398608984866 11.60641506296129,11.268786568327291 11.546020906725474,11.343117909694037 11.481902250709918,11.414213562373096 11.414213562373096,11.481902250709918 11.343117909694037,11.546020906725474 11.268786568327291,11.60641506296129 11.191398608984866,11.66293922460509 11.111140466039204,11.715457220000545 11.028205488386444,11.76384252869671 10.942793473651996,11.807978586246886 10.855110186860564,11.847759065022574 10.76536686473018,11.88308813036604 10.67377970678444,11.913880671464417 10.580569354508924,11.940062506389088 10.485960359806528,11.96157056080646 10.390180644032256,11.978353019929562 10.293460948910724,11.990369453344393 10.196034280659122,11.997590912410345 10.098135348654836,12 10,11.997590912410345 9.901864651345164,11.990369453344393 9.803965719340878,11.978353019929562 9.706539051089276,11.96157056080646 9.609819355967744,11.940062506389088 9.514039640193472,11.913880671464417 9.419430645491076,11.88308813036604 9.32622029321556,11.847759065022574 9.23463313526982,11.807978586246886 9.144889813139436,11.76384252869671 9.057206526348004,11.715457220000545 8.971794511613556,11.66293922460509 8.888859533960796,11.60641506296129 8.808601391015134,11.546020906725474 8.731213431672709,11.481902250709918 8.656882090305963,11.414213562373096 8.585786437626904,2.414213562373095 -0.4142135623730949,2.4066058465972153 -0.42178057108631606,2.335148062225815 -0.4890868517096818,2.260473789227354 -0.5528057916786753,2.1827629244661795 -0.6127838864857904,2.1022026802221694 -0.6688766436471771,2.018987133179951 -0.7209489307976877,1.9333167568811709 -0.7688753012365837,1.8453979387661708 -0.8125402961396226,1.7554424829685693 -0.8518387227094812,1.6636671000605499 -0.8866759075944177,1.570292884978112 -0.9169679249646674,1.4755447843840046 -0.9426417986971172,1.3796510547515126 -0.9636356781811806,1.282842712474619 -0.9798989873223332,1.1853529773292786 -0.9913925463843567,1.0874167106265484 -0.9980886663767536,0.9892698494111194 -0.9999712157599518)) +select astext(geom), area(geom),round(area(ST_buffer(geom,2)), 7) from t1; +astext(geom) area(geom) round(area(ST_buffer(geom,2)), 7) +POLYGON((0 0,10 10,0 8,0 0)) 40 117.2416764 +POLYGON((1 1,10 10,0 8,1 1)) 36 108.5553959 +select ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom,2))) from t1; +ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom,2))) +133 +134 set @geom=geomfromtext('LINESTRING(2 1, 4 2, 2 3, 2 5)'); set @buff=ST_buffer(@geom,1); -select astext(@buff); -astext(@buff) -POLYGON((2.0218594008566466 0.00023894525032219782,1.9727771204112932 0.00037061126290494073,1.9237604222673113 0.002910472030148492,1.8749273919438858 0.0078524088049996,1.8263956724883341 0.015184516028905026,1.7782821810637013 0.024889130013345362,1.7307028272850733 0.03694287149320841,1.683772233983162 0.05131670194948634,1.6376034610678665 0.06797599356561079,1.592307733157046 0.08688061264889702,1.5479941716266756 0.10798501631612445,1.504769531727891 0.13123836221033125,1.46273794540424 0.1565846309845056,1.4220006704287085 0.18396276125709976,1.382655846464876 0.21330679671424568,1.3447982586398712 0.24454604500429356,1.3085191091986976 0.2776052480418776,1.2739057977900368 0.3124047633112361,1.241041710912841 0.34886075573200737,1.2100060210309511 0.38688539962528223,1.1808734958396978 0.4263870902933562,1.1537143181439746 0.46727066470347056,1.1285939167817136 0.5094376307438929,1.1055728090000843 0.5527864045000421,1.0847064546641425 0.5972125549790352,1.0660451226491614 0.6426090556930975,1.0496337697385036 0.6888665424957445,1.0355119323187965 0.7358735770495916,1.0237136311333106 0.7835169152910685,1.0142672893230111 0.8316817802452878,1.0071956639527206 0.8802521385338314,1.0025157911873577 0.9291109799093207,1.0002389452503222 0.9781405991433534,1.000370611262905 1.0272228795887068,1.0029104720301485 1.0762395777326887,1.0078524088049996 1.1250726080561142,1.015184516028905 1.1736043275116659,1.0248891300133454 1.2217178189362987,1.0369428714932085 1.2692971727149267,1.0513167019494865 1.316227766016838,1.0679759935656108 1.3623965389321335,1.086880612648897 1.407692266842954,1.1079850163161244 1.4520058283733244,1.1312383622103312 1.495230468272109,1.1565846309845056 1.53726205459576,1.1839627612570998 1.5779993295712915,1.2133067967142457 1.617344153535124,1.2445460450042936 1.6552017413601288,1.2776052480418776 1.6914808908013024,1.3124047633112361 1.7260942022099632,1.3488607557320074 1.758958289087159,1.3868853996252821 1.7899939789690489,1.4263870902933562 1.8191265041603022,1.4672706647034706 1.8462856818560254,1.5094376307438928 1.8714060832182864,1.5527864045000421 1.8944271909999157,1.7639320225002106 2,1.5527864045000421 2.1055728090000843,1.5286032631740025 2.118078735651645,1.4858972558067784 2.1422713899997277,1.4444297669803978 2.1685303876974547,1.4043006955075668 2.196792468519355,1.3656067158363545 2.226989546637263,1.3284410451529816 2.259048874645041,1.2928932188134525 2.2928932188134525,1.2590488746450408 2.3284410451529816,1.2269895466372631 2.3656067158363543,1.1967924685193552 2.4043006955075668,1.1685303876974547 2.444429766980398,1.1422713899997279 2.4858972558067784,1.118078735651645 2.5286032631740025,1.0960107068765566 2.572444906569718,1.0761204674887133 2.6173165676349104,1.0584559348169793 2.66311014660778,1.043059664267791 2.709715322745538,1.029968746805456 2.757019820096736,1.0192147195967696 2.8049096779838716,1.0108234900352189 2.853269525544638,1.0048152733278033 2.9019828596704396,1.0012045437948276 2.950932325672582,1 3,1 5,1.0048152733278033 5.098017140329561,1.0108234900352189 5.146730474455362,1.0192147195967696 5.195090322016128,1.029968746805456 5.242980179903264,1.043059664267791 5.290284677254462,1.0584559348169793 5.33688985339222,1.0761204674887133 5.38268343236509,1.0960107068765566 5.427555093430282,1.118078735651645 5.471396736825998,1.1422713899997279 5.514102744193222,1.1685303876974547 5.555570233019602,1.1967924685193552 5.595699304492434,1.2269895466372631 5.634393284163646,1.2590488746450408 5.671558954847018,1.2928932188134525 5.707106781186548,1.3284410451529816 5.740951125354959,1.3656067158363545 5.773010453362737,1.4043006955075668 5.803207531480645,1.4444297669803978 5.831469612302545,1.4858972558067784 5.857728610000272,1.5286032631740025 5.881921264348355,1.572444906569718 5.903989293123443,1.6173165676349102 5.923879532511287,1.6631101466077798 5.941544065183021,1.7097153227455377 5.956940335732209,1.7570198200967362 5.970031253194544,1.8049096779838716 5.98078528040323,1.853269525544638 5.989176509964781,1.9019828596704393 5.995184726672197,1.950932325672582 5.998795456205173,2 6,2.049067674327418 5.998795456205173,2.0980171403295604 5.995184726672197,2.146730474455362 5.989176509964781,2.1950903220161284 5.98078528040323,2.242980179903264 5.970031253194544,2.290284677254462 5.956940335732209,2.33688985339222 5.941544065183021,2.3826834323650896 5.923879532511287,2.427555093430282 5.903989293123443,2.4713967368259975 5.881921264348355,2.5141027441932216 5.857728610000272,2.555570233019602 5.831469612302545,2.5956993044924332 5.803207531480645,2.6343932841636457 5.773010453362737,2.6715589548470184 5.740951125354959,2.7071067811865475 5.707106781186548,2.740951125354959 5.671558954847018,2.773010453362737 5.634393284163646,2.803207531480645 5.595699304492434,2.8314696123025453 5.555570233019602,2.8577286100002723 5.514102744193222,2.881921264348355 5.471396736825998,2.9039892931234434 5.427555093430282,2.923879532511287 5.38268343236509,2.9415440651830207 5.33688985339222,2.956940335732209 5.290284677254462,2.970031253194544 5.242980179903264,2.9807852804032304 5.195090322016128,2.989176509964781 5.146730474455362,2.9951847266721967 5.098017140329561,2.9987954562051726 5.049067674327418,3 5,3 3.618033988749895,4.447213595499958 2.8944271909999157,4.452005828373324 2.8920149836838753,4.4952304682721085 2.8687616377896688,4.53726205459576 2.8434153690154944,4.577999329571291 2.8160372387429002,4.617344153535124 2.786693203285754,4.655201741360129 2.7554539549957067,4.691480890801302 2.7223947519581224,4.726094202209963 2.6875952366887637,4.758958289087159 2.6511392442679926,4.789993978969049 2.613114600374718,4.819126504160303 2.573612909706644,4.846285681856025 2.5327293352965294,4.871406083218286 2.490562369256107,4.894427190999916 2.447213595499958,4.9152935453358575 2.402787445020965,4.933954877350839 2.3573909443069025,4.950366230261497 2.3111334575042557,4.964488067681204 2.2641264229504086,4.976286368866689 2.2164830847089316,4.985732710676989 2.1683182197547124,4.992804336047279 2.1197478614661684,4.997484208812643 2.070889020090679,4.999761054749678 2.0218594008566466,4.999629388737095 1.9727771204112932,4.997089527969852 1.9237604222673113,4.992147591195001 1.8749273919438858,4.984815483971095 1.8263956724883341,4.975110869986654 1.7782821810637013,4.963057128506792 1.7307028272850733,4.948683298050514 1.683772233983162,4.932024006434389 1.6376034610678665,4.913119387351103 1.592307733157046,4.892014983683875 1.5479941716266756,4.868761637789669 1.504769531727891,4.843415369015494 1.46273794540424,4.816037238742901 1.4220006704287085,4.786693203285754 1.382655846464876,4.755453954995707 1.3447982586398712,4.722394751958122 1.3085191091986976,4.687595236688764 1.2739057977900368,4.651139244267993 1.241041710912841,4.613114600374717 1.2100060210309511,4.573612909706644 1.1808734958396978,4.53272933529653 1.1537143181439746,4.490562369256107 1.1285939167817136,4.447213595499958 1.1055728090000843,2.447213595499958 0.10557280900008414,2.3573909443069025 0.06604512264916129,2.3111334575042557 0.04963376973850353,2.2641264229504086 0.03551193231879646,2.2164830847089316 0.023713631133310598,2.1683182197547124 0.014267289323011023,2.1197478614661684 0.007195663952720532,2.070889020090679 0.0025157911873575634,2.0218594008566466 0.00023894525032219782)) +select ST_NUMPOINTS(ST_EXTERIORRING(@buff)); +ST_NUMPOINTS(ST_EXTERIORRING(@buff)) +202 DROP TABLE t1; select st_touches(geomfromtext('point(0 0)'), geomfromtext('point(1 1)')); st_touches(geomfromtext('point(0 0)'), geomfromtext('point(1 1)')) @@ -200,6 +200,31 @@ result SELECT ST_Equals(PointFromText('POINT (12 13)'),PointFromText('POINT (12 13)')) as result; result 1 +# +# BUG#11755628/47429: INTERSECTION FUNCTION CRASHED MYSQLD +# BUG#11759650/51979: UNION/INTERSECTION OF POLYGONS CRASHES MYSQL +# +SELECT ASTEXT(ST_UNION(GEOMFROMTEXT('POLYGON((525000 183300,525400 +183300,525400 18370, 525000 183700,525000 183300))'), +geomfromtext('POLYGON((525298.67 183511.53,525296.57 +183510.39,525296.42 183510.31,525289.11 183506.62,525283.17 +183503.47,525280.98 183502.26,525278.63 183500.97,525278.39 +183500.84,525276.79 183500,525260.7 183491.55,525263.95 +183484.75,525265.58 183481.95,525278.97 183488.73,525276.5 +183493.45,525275.5 183495.7,525280.35 183498.2,525282.3 +183499.1,525282.2 183499.3,525283.55 183500,525301.75 +183509.35,525304.45 183504.25,525307.85 183504.95,525304.5 +183510.83,525302.81 183513.8,525298.67 183511.53),(525275.06 +183489.89,525272.06 183488.37,525268.94 183494.51,525271.94 +183496.03,525275.06 183489.89),(525263.26 183491.55,525266.15 +183493.04,525269.88 183485.82,525266.99 183484.33,525263.26 +183491.55))'))) st_u; +st_u +MULTIPOLYGON(((525400 18370,525000.9677614468 183300,525400 183300,525400 18370)),((525000 183300,525000 183700,525000.9677614468 183300,525000 183300)),((525265.58 183481.95,525263.95 183484.75,525260.7 183491.55,525276.79 183500,525278.39 183500.84,525278.63 183500.97,525280.98 183502.26,525283.17 183503.47,525289.11 183506.62,525296.42 183510.31,525296.57 183510.39,525298.67 183511.53,525302.81 183513.8,525304.5 183510.83,525307.85 183504.95,525304.45 183504.25,525301.75 183509.35,525283.55 183500,525282.2 183499.3,525282.3 183499.1,525280.35 183498.2,525275.5 183495.7,525276.5 183493.45,525278.97 183488.73,525265.58 183481.95),(525266.99 183484.33,525263.26 183491.55,525266.15 183493.04,525269.88 183485.82,525266.99 183484.33),(525272.06 183488.37,525268.94 183494.51,525271.94 183496.03,525275.06 183489.89,525272.06 183488.37))) +SET @a=0x0000000001030000000200000005000000000000000000000000000000000000000000000000002440000000000000000000000000000024400000000000002440000000000000000000000000000024400000000000000000000000000000000000000000000000000000F03F000000000000F03F0000000000000040000000000000F03F00000000000000400000000000000040000000000000F03F0000000000000040000000000000F03F000000000000F03F; +SELECT ASTEXT(TOUCHES(@a, GEOMFROMTEXT('point(0 0)'))) t; +t +NULL SELECT astext(ST_UNION ( PolyFromText('POLYGON(( 2 2 ,3 2,2 7,2 2),( 0 0,8 2,1 9,0 0))'), ExteriorRing( Envelope( MultiLineStringFromText('MULTILINESTRING((3 4,5 3),(3 0,0 5))'))))); @@ -230,15 +255,15 @@ MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)) ((2 2,5 2,4 4,2 8,2 2)))'), MULTIPOLY POLYGON((0 2,1 4,1 3,2 3,2 4,1 4,1.5 5,2 5,2 8,8 8,8 2,0 2),(4 4,4 6,6 6,6 4,4 4)) -SELECT ASTEXT(ST_UNION( +SELECT ROUND(ST_LENGTH(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), (8 2,1 3,9 0,4 4))'), -MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))); -ASTEXT(ST_UNION( +MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))), 7); +ROUND(ST_LENGTH(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), (8 2,1 3,9 0,4 4))'), -MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))) -MULTILINESTRING((3.5945945945945947 2.027027027027027,4 0,4.75 0.75),(5 0,4.75 0.75),(5.363636363636363 1.3636363636363635,9 0,6.173913043478262 2.260869565217391),(4.75 0.75,4.428571428571429 1.7142857142857142),(4.75 0.75,5.363636363636363 1.3636363636363635),(5.363636363636363 1.3636363636363635,4.428571428571429 1.7142857142857142),(5.363636363636363 1.3636363636363635,6 2),(4.428571428571429 1.7142857142857142,3.5945945945945947 2.027027027027027),(4.428571428571429 1.7142857142857142,4.15 2.55),(4.5 2.5,5 2,5.3076923076923075 2.3846153846153846),(8 2,6.173913043478262 2.260869565217391),(3.5945945945945947 2.027027027027027,1 3,3.4705882352941178 2.6470588235294117),(3.5945945945945947 2.027027027027027,3.4705882352941178 2.6470588235294117),(6.173913043478262 2.260869565217391,5.3076923076923075 2.3846153846153846),(6.173913043478262 2.260869565217391,5.585365853658536 2.7317073170731705),(5.3076923076923075 2.3846153846153846,4.5 2.5),(5.3076923076923075 2.3846153846153846,5.585365853658536 2.7317073170731705),(4.5 2.5,4.15 2.55),(4.5 2.5,4 3),(4.15 2.55,3.4705882352941178 2.6470588235294117),(4.15 2.55,4 3),(3.4705882352941178 2.6470588235294117,3.25 3.75),(5.585365853658536 2.7317073170731705,4.769230769230769 3.3846153846153846),(5.585365853658536 2.7317073170731705,7.054054054054054 4.5675675675675675),(4 3,3.25 3.75),(4 3,3.142857142857143 5.571428571428571),(4 3,4.769230769230769 3.3846153846153846),(4.769230769230769 3.3846153846153846,4 4),(4.769230769230769 3.3846153846153846,6 4,4.875 5.875),(3.25 3.75,2 5),(3.25 3.75,3 5,3 5.5),(7.054054054054054 4.5675675675675675,8 4,7.16 4.7),(7.054054054054054 4.5675675675675675,4.875 5.875),(7.054054054054054 4.5675675675675675,7.16 4.7),(7.16 4.7,5 6.5),(7.16 4.7,9 7,6 7,5 6.5),(2 5,1 6,3 6),(2 5,3 5.5),(3 5.5,3 6),(3 5.5,3.142857142857143 5.571428571428571),(3.142857142857143 5.571428571428571,3 6),(3.142857142857143 5.571428571428571,4.363636363636363 6.181818181818182),(4.875 5.875,4.363636363636363 6.181818181818182),(4.875 5.875,4.615384615384615 6.3076923076923075),(3 6,2.6666666666666665 7),(4.363636363636363 6.181818181818182,3 7,2.6666666666666665 7),(4.363636363636363 6.181818181818182,4.615384615384615 6.3076923076923075),(4.615384615384615 6.3076923076923075,4 7.333333333333333),(4.615384615384615 6.3076923076923075,5 6.5),(5 6.5,4 7.333333333333333),(2.1818181818181817 8.454545454545455,0 7,2.6666666666666665 7),(2.6666666666666665 7,2.1818181818181817 8.454545454545455),(4 7.333333333333333,2.444444444444444 8.62962962962963),(4 7.333333333333333,3 9,2.444444444444444 8.62962962962963),(2.1818181818181817 8.454545454545455,2 9,2.444444444444444 8.62962962962963),(2.1818181818181817 8.454545454545455,2.444444444444444 8.62962962962963)) +MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6) +90.2783626 SELECT ST_NUMGEOMETRIES((ST_UNION(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 0,4 2,0 2,1 5,0 3,7 0,8 5,5 8), (6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), @@ -434,9 +459,6 @@ ST_WITHIN( MULTIPOINTFROMTEXT(' MULTIPOINT( 2 9 , 2 9 , 4 9 , 9 1 ) ') , POLYGON SELECT ST_INTERSECTS( GeomFromText('MULTILINESTRING( ( 4030 3045 , 3149 2461 , 3004 3831 , 3775 2976 ) )') , GeomFromText('LINESTRING(3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29,3039.07 3175.05,3039.07 3175.05,3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29)') ); ST_INTERSECTS( GeomFromText('MULTILINESTRING( ( 4030 3045 , 3149 2461 , 3004 3831 , 3775 2976 ) )') , GeomFromText('LINESTRING(3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29,3039.07 3175.05,3039.07 3175.05,3058.41 3187.91,3081.52 3153.19, 1 -select ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)); -ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)) -POLYGON((3.999999999999999 6.999999999999998,4 7,3.999999999999999 6.999999999999998)) SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 -3.0, -2.910427500435995 0.727606875108998, -0.910427500435995 8.727606875108998, @@ -455,3 +477,296 @@ ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 -3.0, select astext(buffer(st_linestringfromwkb(linestring(point(-1,1), point(-1,-2))),-1)); astext(buffer(st_linestringfromwkb(linestring(point(-1,1), point(-1,-2))),-1)) GEOMETRYCOLLECTION EMPTY +DROP TABLE IF EXISTS p1; +CREATE PROCEDURE p1(dist DOUBLE, geom TEXT) +BEGIN +DECLARE g GEOMETRY; +SET g=GeomFromText(geom); +SELECT geom AS `-----`; +SELECT dist, GeometryType(@buf:=ST_Buffer(g, dist)) AS `buffer`, ROUND(ST_AREA(@buf),2) AS buf_area; +END| +# +# Testing ST_BUFFER with positive distance +# +----- +POINT(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +1 POLYGON 5.14 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +1 POLYGON 44.63 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +1 POLYGON 63.14 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +1 POLYGON 95.14 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +1 POLYGON 174.93 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +1 POLYGON 8.80 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +1 POLYGON 14.24 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +1 POLYGON 13.59 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +1 MULTIPOLYGON 70.06 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +1 POLYGON 8.80 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +1 POLYGON 63.14 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +1 MULTIPOLYGON 10.28 +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +1 MULTIPOLYGON 48.28 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +1 POLYGON 75.92 +# +# Testing ST_BUFFER with zero distance +# +----- +POINT(0 0)) +dist buffer buf_area +0 POINT 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +0 LINESTRING 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +0 LINESTRING 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +0 POLYGON 36.00 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +0 POLYGON 48.00 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +0 POLYGON 116.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +0 MULTIPOINT NULL +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 66.00 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 62.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 18.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +# +# Testing ST_BUFFER with negative distance +# +----- +POINT(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +-1 POLYGON 16.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +-1 POLYGON 16.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +-1 POLYGON 16.00 +SELECT ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')); +ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')) +0 +SELECT AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))); +AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))) +GEOMETRYCOLLECTION(POLYGON((0 0,0 5,5 5,5 0,0 0)),POLYGON((6 6,6 11,11 11,11 6,6 6)),POINT(5 10)) +DROP PROCEDURE p1; +# +# Bug #13833019 ASSERTION `T1->RESULT_RANGE' FAILED IN GCALC_OPERATION_REDUCER::END_COUPLE +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)) +POLYGON +# +# Bug #13832749 HANDLE_FATAL_SIGNAL IN GCALC_FUNCTION::COUNT_INTERNAL +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)) +POLYGON +# +# Bug#13358363 - ASSERTION: N > 0 && N < SINUSES_CALCULATED*2+1 | GET_N_SINCOS/ADD_EDGE_BUFFER +# +DO ST_BUFFER(ST_GEOMCOLLFROMTEXT('linestring(1 1,2 2)'),''); +SELECT ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT('POLYGON((3 5,2 4,2 5,3 5)) ')))); +ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) ') +0 +SELECT ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))); +ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))) +2 +SELECT ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))); +ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))) +0 +SELECT ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))); +ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))) +POLYGON((9 9,5 2,4 5,9 9)) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 9acdb1a87c2..a1d2ec862b7 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -834,6 +834,17 @@ create table t1 (g geometry not null); insert into t1 values(default); ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field drop table t1; +CREATE TABLE t1 (a GEOMETRY); +CREATE VIEW v1 AS SELECT GeomFromwkb(ASBINARY(a)) FROM t1; +CREATE VIEW v2 AS SELECT a FROM t1; +DESCRIBE v1; +Field Type Null Key Default Extra +GeomFromwkb(ASBINARY(a)) geometry YES NULL +DESCRIBE v2; +Field Type Null Key Default Extra +a geometry YES NULL +DROP VIEW v1,v2; +DROP TABLE t1; create table t1 (name VARCHAR(100), square GEOMETRY); INSERT INTO t1 VALUES("center", GeomFromText('POLYGON (( 0 0, 0 2, 2 2, 2 0, 0 0))')); INSERT INTO t1 VALUES("small", GeomFromText('POLYGON (( 0 0, 0 1, 1 1, 1 0, 0 0))')); diff --git a/mysql-test/r/group_min_max.result b/mysql-test/r/group_min_max.result index 81cdad8c523..06a8a8a06b8 100644 --- a/mysql-test/r/group_min_max.result +++ b/mysql-test/r/group_min_max.result @@ -3536,7 +3536,7 @@ COUNT(DISTINCT a, b) SUM(DISTINCT a) 0 NULL EXPLAIN SELECT SUM(DISTINCT a), MAX(b) FROM t2 GROUP BY a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range NULL a 5 NULL 9 Using index for group-by +1 SIMPLE t2 index NULL a 15 NULL 16 Using index SELECT SUM(DISTINCT a), MAX(b) FROM t2 GROUP BY a; SUM(DISTINCT a) MAX(b) 1 8 @@ -3564,7 +3564,7 @@ SELECT 42 * (a + c + COUNT(DISTINCT c, a, b)) FROM t2 GROUP BY a, b, c; 168 EXPLAIN SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range NULL a 5 NULL 9 Using index for group-by +1 SIMPLE t2 index NULL a 15 NULL 16 Using index SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a; (SUM(DISTINCT a) + MAX(b)) 9 @@ -3593,6 +3593,58 @@ id select_type table type possible_keys key key_len ref rows Extra drop table t1; # End of test#50539. # +# Bug#17217128 - BAD INTERACTION BETWEEN MIN/MAX AND +# "HAVING SUM(DISTINCT)": WRONG RESULTS. +# +CREATE TABLE t (a INT, b INT, KEY(a,b)); +INSERT INTO t VALUES (1,1), (2,2), (3,3), (4,4), (1,0), (3,2), (4,5); +ANALYZE TABLE t; +Table Op Msg_type Msg_text +test.t analyze status OK +SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; +a SUM(DISTINCT a) MIN(b) +1 1 0 +2 2 2 +3 3 2 +4 4 4 +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; +a SUM(DISTINCT a) MAX(b) +1 1 1 +2 2 2 +3 3 3 +4 4 5 +EXPLAIN SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); +a MAX(b) +1 1 +2 2 +3 3 +4 5 +EXPLAIN SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; +SUM(DISTINCT a) MIN(b) MAX(b) +10 0 5 +EXPLAIN SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +a SUM(DISTINCT a) MIN(b) MAX(b) +1 1 0 1 +2 2 2 2 +3 3 2 3 +4 4 4 5 +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +DROP TABLE t; +# # MDEV-4219 A simple select query returns random data (upstream bug#68473) # drop table if exists faulty; diff --git a/mysql-test/r/group_min_max_innodb.result b/mysql-test/r/group_min_max_innodb.result index 320c4b2b750..f3511b0ad4a 100644 --- a/mysql-test/r/group_min_max_innodb.result +++ b/mysql-test/r/group_min_max_innodb.result @@ -118,3 +118,171 @@ COUNT(DISTINCT a) 1 DROP TABLE t1; End of 5.5 tests +# +# Bug#17909656 - WRONG RESULTS FOR A SIMPLE QUERY WITH GROUP BY +# +CREATE TABLE t0 ( +i1 INTEGER NOT NULL +); +INSERT INTO t0 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10), +(11),(12),(13),(14),(15),(16),(17),(18),(19),(20), +(21),(22),(23),(24),(25),(26),(27),(28),(29),(30); +CREATE TABLE t1 ( +c1 CHAR(1) NOT NULL, +i1 INTEGER NOT NULL, +i2 INTEGER NOT NULL, +UNIQUE KEY k1 (c1,i2) +) ENGINE=InnoDB; +INSERT INTO t1 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'F',i1,i1 FROM t0; +CREATE TABLE t2 ( +c1 CHAR(1) NOT NULL, +i1 INTEGER NOT NULL, +i2 INTEGER NOT NULL, +UNIQUE KEY k2 (c1,i1,i2) +) ENGINE=InnoDB; +INSERT INTO t2 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'F',i1,i1 FROM t0; +ANALYZE TABLE t1; +ANALYZE TABLE t2; +EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F') +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 31 Using where; Using index +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F') +GROUP BY c1; +c1 max(i2) +C 17 +F 30 +EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17)) +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 31 Using where; Using index +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17)) +GROUP BY c1; +c1 max(i2) +C 30 +F 17 +EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 ) +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 2 Using where; Using index +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 ) +GROUP BY c1; +c1 max(i2) +C 17 +F 17 +EXPLAIN SELECT c1, max(i2) FROM t1 +WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 ))) +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 3 Using where; Using index +SELECT c1, max(i2) FROM t1 +WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 ))) +GROUP BY c1; +c1 max(i2) +C 30 +EXPLAIN SELECT c1, i1, max(i2) FROM t2 +WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) +GROUP BY c1,i1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range k2 k2 5 NULL 59 Using where; Using index +SELECT c1, i1, max(i2) FROM t2 +WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) +GROUP BY c1,i1; +c1 i1 max(i2) +C 17 17 +F 17 17 +EXPLAIN SELECT c1, i1, max(i2) FROM t2 +WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) +GROUP BY c1,i1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range k2 k2 5 NULL 58 Using where; Using index +SELECT c1, i1, max(i2) FROM t2 +WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) +GROUP BY c1,i1; +c1 i1 max(i2) +C 17 17 +F 17 17 +EXPLAIN SELECT c1, i1, max(i2) FROM t2 +WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 )) +GROUP BY c1,i1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index k2 k2 9 NULL 180 Using where; Using index +SELECT c1, i1, max(i2) FROM t2 +WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 )) +GROUP BY c1,i1; +c1 i1 max(i2) +A 17 17 +B 17 17 +C 1 1 +C 2 2 +C 3 3 +C 4 4 +C 5 5 +C 6 6 +C 7 7 +C 8 8 +C 9 9 +C 10 10 +C 11 11 +C 12 12 +C 13 13 +C 14 14 +C 15 15 +C 16 16 +C 17 17 +C 18 18 +C 19 19 +C 20 20 +C 21 21 +C 22 22 +C 23 23 +C 24 24 +C 25 25 +C 26 26 +C 27 27 +C 28 28 +C 29 29 +C 30 30 +D 17 17 +E 17 17 +F 1 1 +F 2 2 +F 3 3 +F 4 4 +F 5 5 +F 6 6 +F 7 7 +F 8 8 +F 9 9 +F 10 10 +F 11 11 +F 12 12 +F 13 13 +F 14 14 +F 15 15 +F 16 16 +F 17 17 +F 18 18 +F 19 19 +F 20 20 +F 21 21 +F 22 22 +F 23 23 +F 24 24 +F 25 25 +F 26 26 +F 27 27 +F 28 28 +F 29 29 +F 30 30 +DROP TABLE t0,t1,t2; diff --git a/mysql-test/r/have_mysql_upgrade.result b/mysql-test/r/have_mysql_upgrade.result deleted file mode 100644 index 952bea420f9..00000000000 --- a/mysql-test/r/have_mysql_upgrade.result +++ /dev/null @@ -1,2 +0,0 @@ -have_mysql_upgrade -1 diff --git a/mysql-test/r/huge_frm-6224.result b/mysql-test/r/huge_frm-6224.result new file mode 100644 index 00000000000..3772317c04d --- /dev/null +++ b/mysql-test/r/huge_frm-6224.result @@ -0,0 +1 @@ +ERROR HY000: The definition for table `t1` is too big diff --git a/mysql-test/r/innodb_load_xa.result b/mysql-test/r/innodb_load_xa.result new file mode 100644 index 00000000000..85e6d52c098 --- /dev/null +++ b/mysql-test/r/innodb_load_xa.result @@ -0,0 +1,21 @@ +install plugin innodb soname 'ha_innodb'; +Warnings: +Warning 1105 Cannot enable tc-log at run-time. XA features of InnoDB are disabled +select engine,support,transactions,xa from information_schema.engines where engine='innodb'; +engine support transactions xa +InnoDB YES YES NO +create table t1 (a int) engine=innodb; +start transaction; +insert t1 values (1); +insert t1 values (2); +commit; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +mysqld-bin.000001 # Gtid # # GTID #-#-# +mysqld-bin.000001 # Query # # use `test`; create table t1 (a int) engine=innodb +mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-# +mysqld-bin.000001 # Query # # use `test`; insert t1 values (1) +mysqld-bin.000001 # Query # # use `test`; insert t1 values (2) +mysqld-bin.000001 # Query # # COMMIT +drop table t1; +uninstall plugin innodb; diff --git a/mysql-test/r/innodb_mysql_lock2.result b/mysql-test/r/innodb_mysql_lock2.result index 17dd747de6f..54203c140a2 100644 --- a/mysql-test/r/innodb_mysql_lock2.result +++ b/mysql-test/r/innodb_mysql_lock2.result @@ -331,13 +331,14 @@ Success: 'update v2 set j= j-10 where j = 3' takes shared row locks on 't1'. # 4.1 SELECT/SET with a stored function which does not # modify data and uses SELECT in its turn. # -# In theory there is no need to take row locks on the table +# There is no need to take row locks on the table # being selected from in SF as the call to such function -# won't get into the binary log. In practice, however, we -# discover that fact too late in the process to be able to -# affect the decision what locks should be taken. -# Hence, strong locks are taken in this case. -Success: 'select f1()' takes shared row locks on 't1'. +# won't get into the binary log. +# +# However in practice innodb takes strong lock on tables +# being selected from within SF, when SF is called from +# non SELECT statements like 'set' statement below. +Success: 'select f1()' doesn't take row locks on 't1'. Success: 'set @a:= f1()' takes shared row locks on 't1'. # # 4.2 INSERT (or other statement which modifies data) with @@ -364,13 +365,15 @@ Success: 'set @a:= f2()' takes shared row locks on 't1'. # modify data and reads a table through subselect # in a control construct. # -# Again, in theory a call to this function won't get to the -# binary log and thus no locking is needed. But in practice -# we don't detect this fact early enough (get_lock_type_for_table()) -# to avoid taking row locks. -Success: 'select f3()' takes shared row locks on 't1'. +# Call to this function won't get to the +# binary log and thus no locking is needed. +# +# However in practice innodb takes strong lock on tables +# being selected from within SF, when SF is called from +# non SELECT statements like 'set' statement below. +Success: 'select f3()' doesn't take row locks on 't1'. Success: 'set @a:= f3()' takes shared row locks on 't1'. -Success: 'select f4()' takes shared row locks on 't1'. +Success: 'select f4()' doesn't take row locks on 't1'. Success: 'set @a:= f4()' takes shared row locks on 't1'. # # 4.5. INSERT (or other statement which modifies data) with @@ -398,13 +401,15 @@ Success: 'set @a:= f5()' takes shared row locks on 't1'. # doesn't modify data and reads tables through # a view. # -# Once again, in theory, calls to such functions won't -# get into the binary log and thus don't need row -# locks. But in practice this fact is discovered -# too late to have any effect. -Success: 'select f6()' takes shared row locks on 't1'. +# Calls to such functions won't get into +# the binary log and thus don't need row locks. +# +# However in practice innodb takes strong lock on tables +# being selected from within SF, when SF is called from +# non SELECT statements like 'set' statement below. +Success: 'select f6()' doesn't take row locks on 't1'. Success: 'set @a:= f6()' takes shared row locks on 't1'. -Success: 'select f7()' takes shared row locks on 't1'. +Success: 'select f7()' doesn't take row locks on 't1'. Success: 'set @a:= f7()' takes shared row locks on 't1'. # # 4.8 INSERT which uses stored function which @@ -431,10 +436,9 @@ Success: 'select f9()' takes shared row locks on 't1'. # data and reads a table indirectly, by calling another # function. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire row locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f10()' takes shared row locks on 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire row locks. +Success: 'select f10()' doesn't take row locks on 't1'. # # 4.11 INSERT which uses a stored function which doesn't modify # data and reads a table indirectly, by calling another @@ -494,10 +498,9 @@ Success: 'select f14()' takes shared row locks on 't1'. # 5.3 SELECT that calls a function that doesn't modify data and # uses a CALL statement that reads a table via SELECT. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire row locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f15()' takes shared row locks on 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire row locks. +Success: 'select f15()' doesn't take row locks on 't1'. # # 5.4 INSERT which calls function which doesn't modify data and # uses CALL statement which reads table through SELECT. diff --git a/mysql-test/r/innodb_mysql_sync.result b/mysql-test/r/innodb_mysql_sync.result index 21e9cd04c22..49d69d13e40 100644 --- a/mysql-test/r/innodb_mysql_sync.result +++ b/mysql-test/r/innodb_mysql_sync.result @@ -86,7 +86,10 @@ SET DEBUG_SYNC= 'now SIGNAL killed'; # Reaping: OPTIMIZE TABLE t1 Table Op Msg_type Msg_text test.t1 optimize note Table does not support optimize, doing recreate + analyze instead +test.t1 optimize error Query execution was interrupted test.t1 optimize status Operation failed +Warnings: +Error 1317 Query execution was interrupted # Connection default DROP TABLE t1; SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/r/ipv4_and_ipv6.result b/mysql-test/r/ipv4_and_ipv6.result new file mode 100644 index 00000000000..f15aeba65fa --- /dev/null +++ b/mysql-test/r/ipv4_and_ipv6.result @@ -0,0 +1,58 @@ +=============Test of '::1' ======================================== +mysqld is alive +CREATE USER testuser@'::1' identified by '1234'; +GRANT ALL ON test.* TO testuser@'::1'; +SHOW GRANTS FOR testuser@'::1'; +Grants for testuser@::1 +GRANT USAGE ON *.* TO 'testuser'@'::1' IDENTIFIED BY PASSWORD '*A4B6157319038724E3560894F7F932C8886EBFCF' +GRANT ALL PRIVILEGES ON `test`.* TO 'testuser'@'::1' +SET @nip= inet_aton('::1'); +SELECT @nip; +@nip +NULL +SELECT inet_ntoa(@nip); +inet_ntoa(@nip) +NULL +SELECT USER(); +USER() +root@localhost +SELECT current_user(); +current_user() +root@localhost +SHOW PROCESSLIST; +REVOKE ALL ON test.* FROM testuser@'::1'; +RENAME USER testuser@'::1' to testuser1@'::1'; +SET PASSWORD FOR testuser1@'::1' = PASSWORD ('9876'); +SELECT USER(); +USER() +root@localhost +DROP USER testuser1@'::1'; +=============Test of '127.0.0.1' (IPv4) =========================== +mysqld is alive +CREATE USER testuser@'127.0.0.1' identified by '1234'; +GRANT ALL ON test.* TO testuser@'127.0.0.1'; +SHOW GRANTS FOR testuser@'127.0.0.1'; +Grants for testuser@127.0.0.1 +GRANT USAGE ON *.* TO 'testuser'@'127.0.0.1' IDENTIFIED BY PASSWORD '*A4B6157319038724E3560894F7F932C8886EBFCF' +GRANT ALL PRIVILEGES ON `test`.* TO 'testuser'@'127.0.0.1' +SET @nip= inet_aton('127.0.0.1'); +SELECT @nip; +@nip +2130706433 +SELECT inet_ntoa(@nip); +inet_ntoa(@nip) +127.0.0.1 +SELECT USER(); +USER() +root@localhost +SELECT current_user(); +current_user() +root@localhost +SHOW PROCESSLIST; +REVOKE ALL ON test.* FROM testuser@'127.0.0.1'; +RENAME USER testuser@'127.0.0.1' to testuser1@'127.0.0.1'; +SET PASSWORD FOR testuser1@'127.0.0.1' = PASSWORD ('9876'); +SELECT USER(); +USER() +root@localhost +DROP USER testuser1@'127.0.0.1'; diff --git a/mysql-test/r/key_cache.result b/mysql-test/r/key_cache.result index fad980c810c..8634beb290f 100644 --- a/mysql-test/r/key_cache.result +++ b/mysql-test/r/key_cache.result @@ -2,6 +2,7 @@ drop table if exists t1, t2, t3; SET @save_key_buffer_size=@@key_buffer_size; SET @save_key_cache_block_size=@@key_cache_block_size; SET @save_key_cache_segments=@@key_cache_segments; +SET @save_key_cache_file_hash_size=@@key_cache_file_hash_size; SELECT @@key_buffer_size, @@small.key_buffer_size; @@key_buffer_size @@small.key_buffer_size 2097152 131072 @@ -84,15 +85,15 @@ select @@key_buffer_size; select @@key_cache_block_size; @@key_cache_block_size 1024 +select @@key_cache_file_hash_size; +@@key_cache_file_hash_size +512 set global keycache1.key_buffer_size=1024*1024; create table t1 (p int primary key, a char(10)) delay_key_write=1; create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a)); -show status like 'key_blocks_used'; -Variable_name Value -Key_blocks_used 0 -show status like 'key_blocks_unused'; -Variable_name Value -Key_blocks_unused KEY_BLOCKS_UNUSED +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; +key_blocks_unused key_blocks_used +0 0 insert into t1 values (1, 'qqqq'), (11, 'yyyy'); insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'), (3, 1, 'yyyy'), (4, 3, 'zzzz'); @@ -108,12 +109,9 @@ p i a 4 3 zzzz update t1 set p=2 where p=1; update t2 set i=2 where i=1; -show status like 'key_blocks_used'; -Variable_name Value -Key_blocks_used 4 -show status like 'key_blocks_unused'; -Variable_name Value -Key_blocks_unused KEY_BLOCKS_UNUSED +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; +key_blocks_unused key_blocks_used +4 4 cache index t1 key (`primary`) in keycache1; Table Op Msg_type Msg_text test.t1 assign_to_keycache status OK @@ -270,12 +268,9 @@ Table Op Msg_type Msg_text test.t1 assign_to_keycache status OK test.t2 assign_to_keycache status OK drop table t1,t2,t3; -show status like 'key_blocks_used'; -Variable_name Value -Key_blocks_used 4 -show status like 'key_blocks_unused'; -Variable_name Value -Key_blocks_unused KEY_BLOCKS_UNUSED +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; +key_blocks_unused key_blocks_used +0 4 create table t1 (a int primary key); cache index t1 in keycache2; Table Op Msg_type Msg_text @@ -558,6 +553,7 @@ KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_B default 1 NULL 2097152 1024 4 # 0 0 0 0 0 small NULL NULL 1048576 1024 1 # 0 0 0 0 0 set global key_buffer_size=32*1024; +set global key_cache_file_hash_size=128; select @@key_buffer_size; @@key_buffer_size 32768 @@ -833,3 +829,4 @@ set global keycache1.key_buffer_size=0; set global keycache2.key_buffer_size=0; set global key_buffer_size=@save_key_buffer_size; set global key_cache_segments=@save_key_cache_segments; +set global key_cache_file_hash_size=@save_key_cache_file_hash_size; diff --git a/mysql-test/r/lock_sync.result b/mysql-test/r/lock_sync.result index 8fe94679e70..219cc08342e 100644 --- a/mysql-test/r/lock_sync.result +++ b/mysql-test/r/lock_sync.result @@ -27,6 +27,7 @@ drop table if exists t0, t1, t2, t3, t4, t5; drop view if exists v1, v2; drop procedure if exists p1; drop procedure if exists p2; +drop procedure if exists p3; drop function if exists f1; drop function if exists f2; drop function if exists f3; @@ -42,6 +43,8 @@ drop function if exists f12; drop function if exists f13; drop function if exists f14; drop function if exists f15; +drop function if exists f16; +drop function if exists f17; create table t1 (i int primary key); insert into t1 values (1), (2), (3), (4), (5); create table t2 (j int primary key); @@ -146,6 +149,26 @@ declare k int; call p2(k); return k; end| +create function f16() returns int +begin +create temporary table if not exists temp1 (a int); +insert into temp1 select * from t1; +drop temporary table temp1; +return 1; +end| +create function f17() returns int +begin +declare j int; +select i from t1 where i = 1 into j; +call p3; +return 1; +end| +create procedure p3() +begin +create temporary table if not exists temp1 (a int); +insert into temp1 select * from t1; +drop temporary table temp1; +end| create trigger t4_bi before insert on t4 for each row begin declare k int; @@ -185,6 +208,7 @@ end| # once during its execution. show create procedure p1; show create procedure p2; +show create procedure p3; show create function f1; show create function f2; show create function f3; @@ -200,6 +224,8 @@ show create function f12; show create function f13; show create function f14; show create function f15; +show create function f16; +show create function f17; # Switch back to connection 'default'. # # 1. Statements that read tables and do not use subqueries. @@ -359,14 +385,11 @@ Success: 'update v2 set j= j-10 where j = 3' doesn't allow concurrent inserts in # 4.1 SELECT/SET with a stored function which does not # modify data and uses SELECT in its turn. # -# In theory there is no need to take strong locks on the table +# There is no need to take strong locks on the table # being selected from in SF as the call to such function -# won't get into the binary log. In practice, however, we -# discover that fact too late in the process to be able to -# affect the decision what locks should be taken. -# Hence, strong locks are taken in this case. -Success: 'select f1()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f1()' doesn't allow concurrent inserts into 't1'. +# won't get into the binary log. +Success: 'select f1()' allows concurrent inserts into 't1'. +Success: 'set @a:= f1()' allows concurrent inserts into 't1'. # # 4.2 INSERT (or other statement which modifies data) with # a stored function which does not modify data and uses @@ -392,14 +415,12 @@ Success: 'set @a:= f2()' doesn't allow concurrent inserts into 't1'. # modify data and reads a table through subselect # in a control construct. # -# Again, in theory a call to this function won't get to the -# binary log and thus no strong lock is needed. But in practice -# we don't detect this fact early enough (get_lock_type_for_table()) -# to avoid taking a strong lock. -Success: 'select f3()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f3()' doesn't allow concurrent inserts into 't1'. -Success: 'select f4()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f4()' doesn't allow concurrent inserts into 't1'. +# Call to this function won't get to the +# binary log and thus no strong lock is needed. +Success: 'select f3()' allows concurrent inserts into 't1'. +Success: 'set @a:= f3()' allows concurrent inserts into 't1'. +Success: 'select f4()' allows concurrent inserts into 't1'. +Success: 'set @a:= f4()' allows concurrent inserts into 't1'. # # 4.5. INSERT (or other statement which modifies data) with # a stored function which does not modify data and reads @@ -426,14 +447,13 @@ Success: 'set @a:= f5()' doesn't allow concurrent inserts into 't1'. # doesn't modify data and reads tables through # a view. # -# Once again, in theory, calls to such functions won't -# get into the binary log and thus don't need strong -# locks. But in practice this fact is discovered -# too late to have any effect. -Success: 'select f6()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f6()' doesn't allow concurrent inserts into 't1'. -Success: 'select f7()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f7()' doesn't allow concurrent inserts into 't1'. +# Calls to such functions won't get into +# the binary log and thus don't need strong +# locks. +Success: 'select f6()' allows concurrent inserts into 't1'. +Success: 'set @a:= f6()' allows concurrent inserts into 't1'. +Success: 'select f7()' allows concurrent inserts into 't1'. +Success: 'set @a:= f7()' allows concurrent inserts into 't1'. # # 4.8 INSERT which uses stored function which # doesn't modify data and reads a table @@ -459,10 +479,9 @@ Success: 'select f9()' doesn't allow concurrent inserts into 't1'. # data and reads a table indirectly, by calling another # function. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire strong locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f10()' doesn't allow concurrent inserts into 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire strong locks. +Success: 'select f10()' allows concurrent inserts into 't1'. # # 4.11 INSERT which uses a stored function which doesn't modify # data and reads a table indirectly, by calling another @@ -501,6 +520,26 @@ Success: 'select f12((select i+10 from t1 where i=1))' allows concurrent inserts # uses. Therefore it should take strong locks on the data it reads. Success: 'insert into t2 values (f13((select i+10 from t1 where i=1)))' doesn't allow concurrent inserts into 't1'. # +# 4.15 SELECT/SET with a stored function which +# inserts data into a temporary table using +# SELECT on t1. +# +# Since this statement is written to the binary log it should +# be serialized with concurrent statements affecting the data it +# uses. Therefore it should take strong locks on the data it reads. +Success: 'select f16()' doesn't allow concurrent inserts into 't1'. +Success: 'set @a:= f16()' doesn't allow concurrent inserts into 't1'. +# +# 4.16 SELECT/SET with a stored function which call procedure +# which inserts data into a temporary table using +# SELECT on t1. +# +# Since this statement is written to the binary log it should +# be serialized with concurrent statements affecting the data it +# uses. Therefore it should take strong locks on the data it reads. +Success: 'select f17()' doesn't allow concurrent inserts into 't1'. +Success: 'set @a:= f17()' doesn't allow concurrent inserts into 't1'. +# # 5. Statements that read tables through stored procedures. # # @@ -522,10 +561,9 @@ Success: 'select f14()' doesn't allow concurrent inserts into 't1'. # 5.3 SELECT that calls a function that doesn't modify data and # uses a CALL statement that reads a table via SELECT. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire strong locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f15()' doesn't allow concurrent inserts into 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire strong locks. +Success: 'select f15()' allows concurrent inserts into 't1'. # # 5.4 INSERT which calls function which doesn't modify data and # uses CALL statement which reads table through SELECT. @@ -585,9 +623,12 @@ drop function f12; drop function f13; drop function f14; drop function f15; +drop function f16; +drop function f17; drop view v1, v2; drop procedure p1; drop procedure p2; +drop procedure p3; drop table t1, t2, t3, t4, t5; set @@global.concurrent_insert= @old_concurrent_insert; # diff --git a/mysql-test/r/log_tables_upgrade.result b/mysql-test/r/log_tables_upgrade.result index 6f7bd64eba3..d25fc018cdf 100644 --- a/mysql-test/r/log_tables_upgrade.result +++ b/mysql-test/r/log_tables_upgrade.result @@ -11,13 +11,8 @@ Table Op Msg_type Msg_text test.bug49823 repair status OK RENAME TABLE general_log TO renamed_general_log; RENAME TABLE test.bug49823 TO general_log; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -48,9 +43,15 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK DROP TABLE general_log; RENAME TABLE renamed_general_log TO general_log; diff --git a/mysql-test/r/myisam_explain_non_select_all.result b/mysql-test/r/myisam_explain_non_select_all.result index 285a1ca6786..3dfa56c38d6 100644 --- a/mysql-test/r/myisam_explain_non_select_all.result +++ b/mysql-test/r/myisam_explain_non_select_all.result @@ -755,6 +755,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 11 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1231,6 +1232,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1275,6 +1277,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1361,6 +1364,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1407,6 +1411,7 @@ Variable_name Value Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1540,6 +1545,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1668,6 +1674,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1675,6 +1682,7 @@ Variable_name Value Handler_read_rnd 5 Handler_read_rnd_next 27 Handler_update 5 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 @@ -1712,6 +1720,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1719,6 +1728,7 @@ Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 Handler_update 1 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 @@ -1799,12 +1809,14 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 @@ -1844,12 +1856,14 @@ Variable_name Value Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 @@ -1977,6 +1991,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1984,6 +1999,7 @@ Variable_name Value Handler_read_rnd 5 Handler_read_rnd_next 27 Handler_update 4 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 @@ -2067,6 +2083,7 @@ Variable_name Value Variable_name Value Handler_read_key 1 Handler_read_next 2 +Sort_priority_queue_sorts 1 Sort_range 1 Sort_rows 2 # Status of testing query execution: @@ -2075,6 +2092,7 @@ Handler_read_key 1 Handler_read_next 2 Handler_read_rnd 2 Handler_update 2 +Sort_priority_queue_sorts 1 Sort_range 1 Sort_rows 2 @@ -2105,6 +2123,7 @@ Variable_name Value Variable_name Value Handler_read_key 1 Handler_read_next 2 +Sort_priority_queue_sorts 1 Sort_range 1 Sort_rows 2 # Status of testing query execution: @@ -2625,6 +2644,7 @@ Variable_name Value Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 # Status of testing query execution: @@ -2632,6 +2652,7 @@ Variable_name Value Handler_read_key 3 Handler_read_rnd_next 8 Handler_update 1 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 @@ -2674,12 +2695,14 @@ Variable_name Value Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 @@ -2724,12 +2747,14 @@ Variable_name Value Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 diff --git a/mysql-test/r/mysql_client_test_comp.result b/mysql-test/r/mysql_client_test_comp.result new file mode 100644 index 00000000000..b9cac467b92 --- /dev/null +++ b/mysql-test/r/mysql_client_test_comp.result @@ -0,0 +1,4 @@ +SET @old_slow_query_log= @@global.slow_query_log; +call mtr.add_suppression(" Error reading file './client_test_db/test_frm_bug.frm'"); +ok +SET @@global.slow_query_log= @old_slow_query_log; diff --git a/mysql-test/r/mysql_upgrade.result b/mysql-test/r/mysql_upgrade.result index 08eafcc1d5e..40c979b8138 100644 --- a/mysql-test/r/mysql_upgrade.result +++ b/mysql-test/r/mysql_upgrade.result @@ -1,11 +1,6 @@ Run mysql_upgrade once -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -35,20 +30,21 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK Run it again - should say already completed This installation of MySQL is already upgraded to VERSION, use --force if you still need to run mysql_upgrade Force should run it regardless of wether it's been run before -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -78,20 +74,21 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK CREATE USER mysqltest1@'%' IDENTIFIED by 'sakila'; GRANT ALL ON *.* TO mysqltest1@'%'; Run mysql_upgrade with password protected account -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -121,9 +118,15 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK DROP USER mysqltest1@'%'; Version check failed. Got the following error when calling the 'mysql' command line client @@ -133,13 +136,8 @@ Run mysql_upgrade with a non existing server socket mysqlcheck: Got error: 2005: Unknown MySQL server host 'not_existing_host' (errno) when trying to connect FATAL ERROR: Upgrade failed set GLOBAL sql_mode='STRICT_ALL_TABLES,ANSI_QUOTES,NO_ZERO_DATE'; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -169,9 +167,15 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK set GLOBAL sql_mode=default; # @@ -182,13 +186,8 @@ CREATE PROCEDURE testproc() BEGIN END; UPDATE mysql.proc SET character_set_client = NULL WHERE name LIKE 'testproc'; UPDATE mysql.proc SET collation_connection = NULL WHERE name LIKE 'testproc'; UPDATE mysql.proc SET db_collation = NULL WHERE name LIKE 'testproc'; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -218,9 +217,15 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK CALL testproc(); DROP PROCEDURE testproc; @@ -234,13 +239,8 @@ WARNING: NULL values of the 'db_collation' column ('mysql.proc' table) have been GRANT USAGE ON *.* TO 'user3'@'%'; GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%'; Run mysql_upgrade with all privileges on a user -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -270,9 +270,15 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK SHOW GRANTS FOR 'user3'@'%'; Grants for user3@% @@ -280,8 +286,38 @@ GRANT USAGE ON *.* TO 'user3'@'%' GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%' DROP USER 'user3'@'%'; End of 5.1 tests -The --upgrade-system-tables option was used, databases won't be touched. -Phase 3/3: Running 'mysql_fix_privilege_tables'... +The --upgrade-system-tables option was used, user tables won't be touched. +Phase 1/3: Running 'mysql_fix_privilege_tables'... +Processing databases +mysql +mysql.column_stats OK +mysql.columns_priv OK +mysql.db OK +mysql.event OK +mysql.func OK +mysql.gtid_slave_pos OK +mysql.help_category OK +mysql.help_keyword OK +mysql.help_relation OK +mysql.help_topic OK +mysql.host OK +mysql.index_stats OK +mysql.innodb_index_stats OK +mysql.innodb_table_stats OK +mysql.plugin OK +mysql.proc OK +mysql.procs_priv OK +mysql.proxies_priv OK +mysql.roles_mapping OK +mysql.servers OK +mysql.table_stats OK +mysql.tables_priv OK +mysql.time_zone OK +mysql.time_zone_leap_second OK +mysql.time_zone_name OK +mysql.time_zone_transition OK +mysql.time_zone_transition_type OK +mysql.user OK OK # # Bug#11827359 60223: MYSQL_UPGRADE PROBLEM WITH OPTION @@ -289,13 +325,8 @@ OK # # Droping the previously created mysql_upgrade_info file.. # Running mysql_upgrade with --skip-write-binlog.. -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -325,9 +356,15 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK # # MDEV-4332 Increase username length from 16 characters @@ -341,13 +378,8 @@ GRANT INSERT ON mysql.user TO very_long_user_name_number_2; GRANT UPDATE (User) ON mysql.db TO very_long_user_name_number_1; GRANT UPDATE (User) ON mysql.db TO very_long_user_name_number_2; CREATE PROCEDURE test.pr() BEGIN END; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -377,9 +409,15 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK SELECT definer FROM mysql.proc WHERE db = 'test' AND name = 'pr'; definer diff --git a/mysql-test/r/mysql_upgrade_no_innodb.result b/mysql-test/r/mysql_upgrade_no_innodb.result index ad6f4617657..5a1cd6b9e7d 100644 --- a/mysql-test/r/mysql_upgrade_no_innodb.result +++ b/mysql-test/r/mysql_upgrade_no_innodb.result @@ -1,3 +1,45 @@ -The --upgrade-system-tables option was used, databases won't be touched. -Phase 3/3: Running 'mysql_fix_privilege_tables'... +The --upgrade-system-tables option was used, user tables won't be touched. +Phase 1/3: Running 'mysql_fix_privilege_tables'... +Processing databases +mysql +mysql.column_stats OK +mysql.columns_priv OK +mysql.db OK +mysql.event OK +mysql.func OK +mysql.gtid_slave_pos OK +mysql.help_category OK +mysql.help_keyword OK +mysql.help_relation OK +mysql.help_topic OK +mysql.host OK +mysql.index_stats OK +mysql.innodb_index_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt +mysql.innodb_table_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt +mysql.plugin OK +mysql.proc OK +mysql.procs_priv OK +mysql.proxies_priv OK +mysql.roles_mapping OK +mysql.servers OK +mysql.table_stats OK +mysql.tables_priv OK +mysql.time_zone OK +mysql.time_zone_leap_second OK +mysql.time_zone_name OK +mysql.time_zone_transition OK +mysql.time_zone_transition_type OK +mysql.user OK + +Repairing tables +mysql.innodb_index_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt +mysql.innodb_table_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt OK diff --git a/mysql-test/r/mysql_upgrade_ssl.result b/mysql-test/r/mysql_upgrade_ssl.result index 87b7d51e8b7..70801da20ea 100644 --- a/mysql-test/r/mysql_upgrade_ssl.result +++ b/mysql-test/r/mysql_upgrade_ssl.result @@ -1,13 +1,8 @@ # # Bug#55672 mysql_upgrade dies with internal error # -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/3: Running 'mysql_fix_privilege_tables'... Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -37,7 +32,13 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/3: Fixing table and database names +Phase 3/3: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index dec14751d0d..99e1a86aa68 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -281,6 +281,11 @@ The following options may be given as the first argument: The default size of key cache blocks --key-cache-division-limit=# The minimum percentage of warm blocks in key cache + --key-cache-file-hash-size=# + Number of hash buckets for open and changed files. If + you have a lot of MyISAM files open you should increase + this for faster flush of changes. A good value is + probably 1/10 of number of possible open MyISAM files. --key-cache-segments=# The number of segments in a key cache -L, --language=name Client error messages in given language. May be given as @@ -1009,8 +1014,8 @@ The following options may be given as the first argument: created to handle remaining clients. --thread-stack=# The stack size for each thread --time-format=name The TIME format (ignored) - --timed-mutexes Specify whether to time mutexes (only InnoDB mutexes are - currently supported) + --timed-mutexes Specify whether to time mutexes. Deprecated, has no + effect. --tmp-table-size=# If an internal in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM or Aria table @@ -1211,6 +1216,7 @@ key-buffer-size 134217728 key-cache-age-threshold 300 key-cache-block-size 1024 key-cache-division-limit 100 +key-cache-file-hash-size 512 key-cache-segments 0 large-pages FALSE lc-messages en_US diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result index 7228daa2712..e258b1d156f 100644 --- a/mysql-test/r/mysqltest.result +++ b/mysql-test/r/mysqltest.result @@ -680,6 +680,9 @@ txt b is b and more is more txt a is a and less is more +sflfdt 'ABCDfF bbddff h' bs txt; +txt +ABCDfF bbddff h create table t2 ( a char(10)); garbage; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'garbage' at line 1 diff --git a/mysql-test/r/order_by_innodb.result b/mysql-test/r/order_by_innodb.result new file mode 100644 index 00000000000..3c6c4053741 --- /dev/null +++ b/mysql-test/r/order_by_innodb.result @@ -0,0 +1,13 @@ +drop table if exists t0,t1,t2,t3; +# +# MDEV-6434: Wrong result (extra rows) with ORDER BY, multiple-column index, InnoDB +# +CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB; +INSERT INTO t1 (a,c) VALUES +(8, 9),(8, 10),(13, 15),(16, 17),(16, 18),(16, 19),(20, 21), +(20, 22),(20, 24),(20, 25),(20, 26),(20, 27),(20, 28); +SELECT * FROM t1 WHERE a = 8 AND (b = 1 OR b IS NULL) ORDER BY c; +a b c d +8 NULL 9 NULL +8 NULL 10 NULL +DROP TABLE t1; diff --git a/mysql-test/r/order_by_sortkey.result b/mysql-test/r/order_by_sortkey.result index 717780f0af2..c1d9609eb47 100644 --- a/mysql-test/r/order_by_sortkey.result +++ b/mysql-test/r/order_by_sortkey.result @@ -45,6 +45,7 @@ FLUSH STATUS; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -153,6 +154,7 @@ f0 f1 f2 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 100 Sort_scan 1 diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index b8011656415..233494238a5 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -2562,6 +2562,50 @@ id id2 dob address city hours_worked_per_week weeks_worked_last_year 16 16 1949-11-07 address16 city16 40 52 50 50 1923-09-08 address50 city50 40 52 drop table t1; +# +# MDEV-6322: The PARTITION engine can return wrong query results +# +CREATE TABLE t1 ( +CustomerID varchar(5) DEFAULT NULL, +CompanyName varchar(40) DEFAULT NULL, +ContactName varchar(30) DEFAULT NULL, +ContactTitle varchar(30) DEFAULT NULL, +Address varchar(60) DEFAULT NULL, +City varchar(15) DEFAULT NULL, +Region varchar(15) DEFAULT NULL, +PostalCode varchar(10) DEFAULT NULL, +Country varchar(15) NOT NULL, +Phone varchar(24) DEFAULT NULL, +Fax varchar(24) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS(Country) +(PARTITION p1 VALUES IN ('Germany','Austria','Switzerland','Poland'), +PARTITION p2 VALUES IN ('USA','Canada','Mexico'), +PARTITION p3 VALUES IN ('Spain','Portugal','Italy'), +PARTITION p4 VALUES IN ('UK','Ireland'), +PARTITION p5 VALUES IN ('France','Belgium'), +PARTITION p6 VALUES IN ('Sweden','Finland','Denmark','Norway'), +PARTITION p7 VALUES IN ('Venezuela','Argentina','Brazil') +); +INSERT INTO t1 (CustomerID, City, Country) VALUES +('ANATR','México D.F','Mexico'), +('ANTON','México D.F','Mexico'), +('BOTTM','Tsawassen','Canada'), +('CENTC','México D.F','Mexico'), +('GREAL','Eugene','USA'), +('HUNGC','Elgin','USA'), +('LAUGB','Vancouver','Canada'), +('LAZYK','Walla Walla','USA'), +('LETSS','San Francisco','USA'), +('LONEP','Portland','USA'); +SELECT * FROM t1 WHERE Country = 'USA'; +CustomerID CompanyName ContactName ContactTitle Address City Region PostalCode Country Phone Fax +GREAL NULL NULL NULL NULL Eugene NULL NULL USA NULL NULL +HUNGC NULL NULL NULL NULL Elgin NULL NULL USA NULL NULL +LAZYK NULL NULL NULL NULL Walla Walla NULL NULL USA NULL NULL +LETSS NULL NULL NULL NULL San Francisco NULL NULL USA NULL NULL +LONEP NULL NULL NULL NULL Portland NULL NULL USA NULL NULL +DROP TABLE t1; CREATE TABLE t1 ( d DATE NOT NULL) PARTITION BY RANGE( YEAR(d) ) ( PARTITION p0 VALUES LESS THAN (1960), diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result index 82ccbe9c6a8..92c9c01db2d 100644 --- a/mysql-test/r/partition_innodb.result +++ b/mysql-test/r/partition_innodb.result @@ -694,6 +694,34 @@ count(*) drop table t3; drop table t1,t2; # +# MySQL Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +# +create table t1(c1 int, c2 int, c3 int, c4 int, +primary key(c1,c2)) engine=InnoDB +partition by list columns(c2) +(partition p1 values in (1,2) engine=InnoDB, +partition p2 values in (3,4) engine=InnoDB); +insert into t1 values (1,1,1,1),(2,3,1,1); +select * from t1 where c1=2 and c2=3; +c1 c2 c3 c4 +2 3 1 1 +drop table t1; +# +# MySQL Bug#72803: Wrong "Impossible where" with LIST partitioning +# also MDEV-6240: Wrong "Impossible where" with LIST partitioning +# +CREATE TABLE t1 ( d DATE) ENGINE = InnoDB +PARTITION BY LIST COLUMNS (d) +( +PARTITION p0 VALUES IN ('1990-01-01','1991-01-01'), +PARTITION p1 VALUES IN ('1981-01-01') +); +INSERT INTO t1 (d) VALUES ('1991-01-01'); +SELECT * FROM t1 WHERE d = '1991-01-01'; +d +1991-01-01 +DROP TABLE t1; +# # MDEV-5963: InnoDB: Assertion failure in file row0sel.cc line 2503, # Failing assertion: 0 with "key ptr now exceeds key end by 762 bytes" # (independent testcase for Oracle Bug#13947868) diff --git a/mysql-test/r/partition_pruning.result b/mysql-test/r/partition_pruning.result index 0a4cf9932c0..e52c2c7d886 100644 --- a/mysql-test/r/partition_pruning.result +++ b/mysql-test/r/partition_pruning.result @@ -3302,6 +3302,120 @@ id select_type table partitions type possible_keys key key_len ref rows Extra 1 SIMPLE t1 p0,p1,p2 ALL NULL NULL NULL NULL 100 Using where drop table t0, t1; # +# Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +# +CREATE TABLE t1 +(c1 int, +c2 int, +c3 int, +c4 int, +PRIMARY KEY (c1,c2)) +PARTITION BY LIST COLUMNS (c2) +(PARTITION p1 VALUES IN (1,2), +PARTITION p2 VALUES IN (3,4)); +INSERT INTO t1 VALUES (1, 1, 1, 1), (2, 3, 1, 1); +INSERT INTO t1 VALUES (1, 2, 1, 1), (2, 4, 1, 1); +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +c1 c2 c3 c4 +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +c1 c2 c3 c4 +1 1 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +c1 c2 c3 c4 +1 1 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +c1 c2 c3 c4 +1 1 1 1 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +c1 c2 c3 c4 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +c1 c2 c3 c4 +1 1 1 1 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +c1 c2 c3 c4 +1 1 1 1 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +c1 c2 c3 c4 +2 3 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +c1 c2 c3 c4 +2 3 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +c1 c2 c3 c4 +2 3 1 1 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +c1 c2 c3 c4 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +c1 c2 c3 c4 +2 3 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +c1 c2 c3 c4 +2 3 1 1 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +c1 c2 c3 c4 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +c1 c2 c3 c4 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +c1 c2 c3 c4 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1 const PRIMARY PRIMARY 8 const,const 1 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 const PRIMARY PRIMARY 8 const,const 1 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 const PRIMARY PRIMARY 8 const,const 1 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +DROP TABLE t1; +# # MDEV-6239: Partition pruning is not working as expected in an inner query # create table t1 diff --git a/mysql-test/r/plugin_loaderr.result b/mysql-test/r/plugin_loaderr.result index 95e5ec794d2..d1189217355 100644 --- a/mysql-test/r/plugin_loaderr.result +++ b/mysql-test/r/plugin_loaderr.result @@ -8,3 +8,6 @@ PLUGIN_TYPE STORAGE ENGINE PLUGIN_LIBRARY NULL PLUGIN_LIBRARY_VERSION NULL LOAD_OPTION ON +# +# MDEV-6351 --plugin=force has no effect for built-in plugins +# diff --git a/mysql-test/r/rpl_mysqldump_slave.result b/mysql-test/r/rpl_mysqldump_slave.result index 4b29ff99f61..9d2fe860f47 100644 --- a/mysql-test/r/rpl_mysqldump_slave.result +++ b/mysql-test/r/rpl_mysqldump_slave.result @@ -4,18 +4,59 @@ include/master-slave.inc # New --dump-slave, --apply-slave-statements functionality # use test; +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; STOP ALL SLAVES; +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; START ALL SLAVES; STOP ALL SLAVES; +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_MYPORT, MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; START ALL SLAVES; start slave; Warnings: Note 1254 Slave is already running +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; start slave; Warnings: Note 1254 Slave is already running +*** Test mysqldump --dump-slave GTID functionality. +SET gtid_seq_no = 1000; +CREATE TABLE t1 (a INT PRIMARY KEY); +DROP TABLE t1; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; + +1. --dump-slave=1 + +SET GLOBAL gtid_slave_pos='0-1-1001'; +CHANGE MASTER '' TO MASTER_USE_GTID=slave_pos; +-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; + +2. --dump-slave=2 + +-- SET GLOBAL gtid_slave_pos='0-1-1001'; +-- CHANGE MASTER '' TO MASTER_USE_GTID=slave_pos; +-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; +*** Test mysqldump --master-data GTID functionality. + +1. --master-data=1 + +-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START; +CHANGE MASTER TO MASTER_USE_GTID=slave_pos; +SET GLOBAL gtid_slave_pos='0-2-1003'; + +2. --master-data=2 + +-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START; +-- CHANGE MASTER TO MASTER_USE_GTID=slave_pos; +-- SET GLOBAL gtid_slave_pos='0-2-1003'; + +3. --master-data --single-transaction + +-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START; +CHANGE MASTER TO MASTER_USE_GTID=slave_pos; +SET GLOBAL gtid_slave_pos='0-2-1003'; include/rpl_end.inc diff --git a/mysql-test/r/single_delete_update.result b/mysql-test/r/single_delete_update.result index d4534a2c2d8..9332effeb56 100644 --- a/mysql-test/r/single_delete_update.result +++ b/mysql-test/r/single_delete_update.result @@ -18,6 +18,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -36,6 +37,7 @@ DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -71,6 +73,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -89,6 +92,7 @@ DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 8 Sort_scan 1 @@ -121,6 +125,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -139,6 +144,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -175,6 +181,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -193,6 +200,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -223,6 +231,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -241,6 +250,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -269,6 +279,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -287,6 +298,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -318,6 +330,7 @@ NULL 13 13 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -341,6 +354,7 @@ DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -378,6 +392,7 @@ NULL 14 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -396,6 +411,7 @@ DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -431,6 +447,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -449,6 +466,7 @@ DELETE FROM t2 ORDER BY a, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 16 Sort_scan 1 @@ -493,6 +511,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -517,6 +536,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -535,6 +555,7 @@ DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -571,6 +592,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -589,6 +611,7 @@ UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -629,6 +652,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -647,6 +671,7 @@ UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -684,6 +709,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -702,6 +728,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -738,6 +765,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -756,6 +784,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -786,6 +815,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -804,6 +834,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -833,6 +864,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -851,6 +883,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -883,6 +916,7 @@ NULL 13 13 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -906,6 +940,7 @@ UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -947,6 +982,7 @@ NULL 14 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -965,6 +1001,7 @@ UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -1005,6 +1042,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -1023,6 +1061,7 @@ UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -1061,6 +1100,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -1085,6 +1125,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -1103,6 +1144,7 @@ UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 diff --git a/mysql-test/r/stat_tables-enospc.result b/mysql-test/r/stat_tables-enospc.result new file mode 100644 index 00000000000..f0d76f04eee --- /dev/null +++ b/mysql-test/r/stat_tables-enospc.result @@ -0,0 +1,10 @@ +call mtr.add_suppression("No space left on device"); +create table t1 (a varchar(255), b varchar(255), c varchar(255)); +set use_stat_tables=PREFERABLY, optimizer_use_condition_selectivity=3; +set debug_dbug='+d,simulate_file_write_error'; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze Error Error writing file 'tmp-file' (Errcode: 28 "No space left on device") +test.t1 analyze status Operation failed +set debug_dbug=''; +drop table t1; diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result index 6cc627ad16c..4103a3424e9 100644 --- a/mysql-test/r/subselect_mat.result +++ b/mysql-test/r/subselect_mat.result @@ -2108,6 +2108,43 @@ EXECUTE stmt; a DROP TABLE t1, t2; DROP VIEW v2; +# +# MDEV-6289 : Unexpected results when querying information_schema +# +CREATE TABLE t1 ( +id int(11) unsigned NOT NULL AUTO_INCREMENT, +db varchar(254) NOT NULL DEFAULT '', +PRIMARY KEY (id), +UNIQUE KEY db (db) +) DEFAULT CHARSET=utf8; +INSERT INTO t1 (db) VALUES ('mysqltest1'),('mysqltest2'),('mysqltest3'),('mysqltest4'); +drop database if exists mysqltest1; +drop database if exists mysqltest2; +drop database if exists mysqltest3; +drop database if exists mysqltest4; +create database mysqltest1; +create database mysqltest2; +create database mysqltest3; +create database mysqltest4; +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +db +mysqltest4 +mysqltest3 +mysqltest2 +mysqltest1 +EXPLAIN EXTENDED +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using temporary; Using filesort +1 PRIMARY t1 eq_ref db db 764 information_schema.schemata.SCHEMA_NAME 1 100.00 Using where; Using index +2 MATERIALIZED schemata ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 select `test`.`t1`.`db` AS `db` from `test`.`t1` semi join (`information_schema`.`schemata`) where (`test`.`t1`.`db` = `information_schema`.`schemata`.`SCHEMA_NAME`) order by `test`.`t1`.`db` desc +drop table t1; +drop database mysqltest1; +drop database mysqltest2; +drop database mysqltest3; +drop database mysqltest4; # End of 5.5 tests set @subselect_mat_test_optimizer_switch_value=null; set @@optimizer_switch='materialization=on,in_to_exists=off,semijoin=off'; diff --git a/mysql-test/r/subselect_sj_mat.result b/mysql-test/r/subselect_sj_mat.result index 95dfc34777b..c6a0344c8a3 100644 --- a/mysql-test/r/subselect_sj_mat.result +++ b/mysql-test/r/subselect_sj_mat.result @@ -2148,4 +2148,41 @@ EXECUTE stmt; a DROP TABLE t1, t2; DROP VIEW v2; +# +# MDEV-6289 : Unexpected results when querying information_schema +# +CREATE TABLE t1 ( +id int(11) unsigned NOT NULL AUTO_INCREMENT, +db varchar(254) NOT NULL DEFAULT '', +PRIMARY KEY (id), +UNIQUE KEY db (db) +) DEFAULT CHARSET=utf8; +INSERT INTO t1 (db) VALUES ('mysqltest1'),('mysqltest2'),('mysqltest3'),('mysqltest4'); +drop database if exists mysqltest1; +drop database if exists mysqltest2; +drop database if exists mysqltest3; +drop database if exists mysqltest4; +create database mysqltest1; +create database mysqltest2; +create database mysqltest3; +create database mysqltest4; +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +db +mysqltest4 +mysqltest3 +mysqltest2 +mysqltest1 +EXPLAIN EXTENDED +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using temporary; Using filesort +1 PRIMARY t1 eq_ref db db 764 information_schema.schemata.SCHEMA_NAME 1 100.00 Using where; Using index +2 MATERIALIZED schemata ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 select `test`.`t1`.`db` AS `db` from `test`.`t1` semi join (`information_schema`.`schemata`) where (`test`.`t1`.`db` = `information_schema`.`schemata`.`SCHEMA_NAME`) order by `test`.`t1`.`db` desc +drop table t1; +drop database mysqltest1; +drop database mysqltest2; +drop database mysqltest3; +drop database mysqltest4; # End of 5.5 tests diff --git a/mysql-test/r/table_options-5867.result b/mysql-test/r/table_options-5867.result new file mode 100644 index 00000000000..21041c7c5c3 --- /dev/null +++ b/mysql-test/r/table_options-5867.result @@ -0,0 +1,37 @@ +install soname 'ha_example'; +set sql_mode='ignore_bad_table_options'; +create table t1 ( +a int complex='c,f,f,f' invalid=3 +) engine=example ull=10000 str='dskj' one_or_two='one' yesno=0 +foobar=barfoo; +Warnings: +Warning 1911 Unknown option 'invalid' +Warning 1911 Unknown option 'foobar' +create table t2 (a int, key (a) some_option=2014); +Warnings: +Warning 1911 Unknown option 'some_option' +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL `complex`='c,f,f,f' `invalid`=3 +) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=10000 `str`='dskj' `one_or_two`='one' `yesno`=0 `foobar`=barfoo `VAROPT`='5' +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL, + KEY `a` (`a`) `some_option`=2014 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +set sql_mode=''; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL `complex`='c,f,f,f' /* `invalid`=3 */ +) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=10000 `str`='dskj' `one_or_two`='one' `yesno`=0 /* `foobar`=barfoo */ `VAROPT`='5' +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL, + KEY `a` (`a`) /* `some_option`=2014 */ +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1, t2; +uninstall soname 'ha_example'; diff --git a/mysql-test/r/type_bit.result b/mysql-test/r/type_bit.result index e6f4db26c23..639a97be27b 100644 --- a/mysql-test/r/type_bit.result +++ b/mysql-test/r/type_bit.result @@ -722,7 +722,12 @@ DROP TABLE t1; CREATE TABLE IF NOT EXISTS t1 ( f1 bit(2) NOT NULL default b'' ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci; -ERROR 42000: Invalid default value for 'f1' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` bit(2) NOT NULL DEFAULT b'0' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci +DROP TABLE t1; create table t1bit7 (a1 bit(7) not null) engine=MyISAM; create table t2bit7 (b1 bit(7)) engine=MyISAM; insert into t1bit7 values (b'1100000'); diff --git a/mysql-test/r/type_bit_innodb.result b/mysql-test/r/type_bit_innodb.result index 9bdd8658690..80fc942e77c 100644 --- a/mysql-test/r/type_bit_innodb.result +++ b/mysql-test/r/type_bit_innodb.result @@ -413,3 +413,12 @@ a ` drop table t1; End of 5.0 tests +create table t1(f1 bit(2) not null default b'10',f2 bit(14) not null default b'11110000111100'); +insert into t1 (f1) values (default); +insert into t1 values (b'',b''),('',''); +select hex(f1), hex(f2) from t1; +hex(f1) hex(f2) +2 3C3C +0 0 +0 0 +drop table t1; diff --git a/mysql-test/r/type_decimal.result b/mysql-test/r/type_decimal.result index fa36e9b5567..f8649f030bb 100644 --- a/mysql-test/r/type_decimal.result +++ b/mysql-test/r/type_decimal.result @@ -810,10 +810,10 @@ c1 drop table t1; SELECT 1 % .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS '%'; % -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 SELECT MOD(1, .123456789123456789123456789123456789123456789123456789123456789123456789123456789) AS 'MOD()'; MOD() -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 create table t1 (f1 decimal(6,6),f2 decimal(6,6) zerofill); insert into t1 values (-0.123456,0.123456); select group_concat(f1),group_concat(f2) from t1; diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 5b3594fe503..fb10e65c0ce 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -703,7 +703,7 @@ select .7777777777777777777777777777777777777 * 777777777777777777.777777777777777777700000000000 select .7777777777777777777777777777777777777 - 0.1; .7777777777777777777777777777777777777 - 0.1 -0.6777777777777777777777777777777777777 +0.677777777777777777777777777778 select .343434343434343434 + .343434343434343434; .343434343434343434 + .343434343434343434 0.686868686868686868 @@ -1840,7 +1840,7 @@ Warnings: Note 1265 Data truncated for column 'c1' at row 4 DESC t2; Field Type Null Key Default Extra -c1 decimal(32,30) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); @@ -1851,7 +1851,7 @@ Note 1265 Data truncated for column 'c1' at row 2 Note 1265 Data truncated for column 'c1' at row 3 DESC t2; Field Type Null Key Default Extra -c1 decimal(34,0) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index 6d99cad30f0..4ecac34d9fa 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -1876,6 +1876,40 @@ SELECT(SELECT 1 AS a FROM dual ORDER BY a DESC LIMIT 1) AS dev; dev 1 # +# Bug #17059925 : UNIONS COMPUTES ROWS_EXAMINED INCORRECTLY +# +SET @old_slow_query_log= @@global.slow_query_log; +SET @old_log_output= @@global.log_output; +SET @old_long_query_time= @@long_query_time; +SET GLOBAL log_output= "TABLE"; +SET GLOBAL slow_query_log= ON; +SET SESSION long_query_time= 0; +CREATE TABLE t17059925 (a INT); +CREATE TABLE t2 (b INT); +CREATE TABLE t3 (c INT); +INSERT INTO t17059925 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (4), (5), (6); +INSERT INTO t3 VALUES (7), (8), (9); +TRUNCATE table mysql.slow_log; +SELECT * FROM t17059925 UNION SELECT * FROM t2 UNION SELECT * FROM t3; +a +1 +2 +3 +4 +5 +6 +7 +8 +9 +SELECT sql_text, rows_examined FROM mysql.slow_log WHERE sql_text LIKE '%SELECT%t17059925%'; +sql_text rows_examined +SELECT * FROM t17059925 UNION SELECT * FROM t2 UNION SELECT * FROM t3 18 +DROP TABLE t17059925, t2, t3; +SET @@long_query_time= @old_long_query_time; +SET @@global.log_output= @old_log_output; +SET @@global.slow_query_log= @old_slow_query_log; +# # lp:1010729: Unexpected syntax error from UNION # (bug #54382) with single-table join nest # diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 2ba64ed3003..a93e3a210ed 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -189,6 +189,8 @@ select @@concurrent_insert; @@concurrent_insert AUTO set global timed_mutexes=ON; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. show variables like 'timed_mutexes'; Variable_name Value timed_mutexes ON @@ -196,6 +198,8 @@ select * from information_schema.session_variables where variable_name like 'tim VARIABLE_NAME VARIABLE_VALUE TIMED_MUTEXES ON set global timed_mutexes=0; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. show variables like 'timed_mutexes'; Variable_name Value timed_mutexes OFF diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 64b329e9e01..ca08c53cabe 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -4789,6 +4789,45 @@ DROP DATABASE IF EXISTS nodb; CREATE VIEW nodb.a AS SELECT 1; ERROR 42000: Unknown database 'nodb' # +# BUG#14117018 - MYSQL SERVER CREATES INVALID VIEW DEFINITION +# BUG#18405221 - SHOW CREATE VIEW OUTPUT INCORRECT +# +CREATE VIEW v1 AS (SELECT '' FROM DUAL); +CREATE VIEW v2 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL +(SELECT '' FROM DUAL); +CREATE VIEW v3 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL +(SELECT '' FROM DUAL) UNION ALL +(SELECT '' FROM DUAL); +CREATE VIEW v4 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL +(SELECT '' AS col2 FROM DUAL) UNION ALL +(SELECT '' FROM DUAL); +CREATE VIEW v5 AS (SELECT 'buggy' AS col1, 'fix' as col2 FROM DUAL) UNION ALL +(SELECT 'buggy' as a, 'fix' as a FROM DUAL); +# Name for the column in select1 is set properly with or +# without this fix. +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +# Name for the column in select2 is set with this fix. +# Without this fix, name would not have set for the +# columns in select2. +SHOW CREATE VIEW v2; +View Create View character_set_client collation_connection +v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS (select 'BUG#14117018' AS `col1`) union all (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +# Name for the field item in select2 & select3 is set with this fix. +# Without this fix, name would not have set for the +# columns in select2 & select3. +SHOW CREATE VIEW v3; +View Create View character_set_client collation_connection +v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS (select 'BUG#14117018' AS `col1`) union all (select '' AS `Name_exp_1`) union all (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +# Name for the field item in select3 is set with this fix. +# Without this fix, name would not have set for the +# columns in select3. +SHOW CREATE VIEW v4; +View Create View character_set_client collation_connection +v4 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS (select 'BUG#14117018' AS `col1`) union all (select '' AS `col2`) union all (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +DROP VIEW v1, v2, v3, v4, v5; +# # lp:833600 Wrong result with view + outer join + uncorrelated subquery (non-semijoin) # CREATE TABLE t1 ( a int, b int ); @@ -5300,6 +5339,61 @@ NULL 8 drop view v1; drop table t1,t2,t3; SET optimizer_switch=@save_optimizer_switch_MDEV_3874; +CREATE TABLE `t1` ( +`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, +`f0` int(11) unsigned NOT NULL DEFAULT '0', +`f1` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`id`), +UNIQUE KEY `id` (`id`) +); +CREATE TABLE `t2` ( +`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, +`f02` bigint(20) unsigned NOT NULL DEFAULT '0', +`f03` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`id`), +UNIQUE KEY `id` (`id`) +); +CREATE ALGORITHM=UNDEFINED SQL SECURITY DEFINER VIEW `v1` AS +SELECT +`t1`.`f0` AS `f0`, +`t1`.`f1` AS `f1`, +`t2`.`f02` AS `f02`, +`t2`.`f03` AS `f03` +FROM +(`t1` LEFT JOIN `t2` ON((`t1`.`id` = `t2`.`f02`))); +CREATE FUNCTION `f1`( +p0 BIGINT(20) UNSIGNED +) +RETURNS bigint(20) unsigned +DETERMINISTIC +CONTAINS SQL +SQL SECURITY DEFINER +COMMENT '' +BEGIN +DECLARE k0 INTEGER UNSIGNED DEFAULT 0; +DECLARE lResult INTEGER UNSIGNED DEFAULT 0; +SET k0 = 0; +WHILE k0 < 1 DO +SELECT COUNT(*) as `f00` INTO lResult FROM `v1` WHERE `v1`.`f0` = p0; -- BUG +SET k0 = k0 + 1; +END WHILE; +RETURN(k0); +END| +SELECT `f1`(1); +`f1`(1) +1 +SELECT `f1`(1); +`f1`(1) +1 +SELECT `f1`(1); +`f1`(1) +1 +SELECT `f1`(1); +`f1`(1) +1 +DROP FUNCTION f1; +DROP VIEW v1; +DROP TABLE t1, t2; # ----------------------------------------------------------------- # -- End of 5.5 tests. # ----------------------------------------------------------------- diff --git a/mysql-test/std_data/checkDBI_DBD-mysql.pl b/mysql-test/std_data/checkDBI_DBD-mysql.pl index d62d2f8bfc0..d62d2f8bfc0 100644..100755 --- a/mysql-test/std_data/checkDBI_DBD-mysql.pl +++ b/mysql-test/std_data/checkDBI_DBD-mysql.pl diff --git a/mysql-test/std_data/mdev6020-mysql-bin.000001 b/mysql-test/std_data/mdev6020-mysql-bin.000001 Binary files differnew file mode 100644 index 00000000000..49853674e9f --- /dev/null +++ b/mysql-test/std_data/mdev6020-mysql-bin.000001 diff --git a/mysql-test/std_data/new-format-relay-log-win.info b/mysql-test/std_data/new-format-relay-log-win.info new file mode 100644 index 00000000000..e00383b5565 --- /dev/null +++ b/mysql-test/std_data/new-format-relay-log-win.info @@ -0,0 +1,6 @@ +5 +.\slave-relay-bin.000001 +4 + +0 +0 diff --git a/mysql-test/std_data/new-format-relay-log.info b/mysql-test/std_data/new-format-relay-log.info new file mode 100644 index 00000000000..883dec1f66b --- /dev/null +++ b/mysql-test/std_data/new-format-relay-log.info @@ -0,0 +1,6 @@ +5 +./slave-relay-bin.000001 +4 + +0 +0 diff --git a/mysql-test/std_data/old-format-relay-log-win.info b/mysql-test/std_data/old-format-relay-log-win.info new file mode 100644 index 00000000000..7673de6b956 --- /dev/null +++ b/mysql-test/std_data/old-format-relay-log-win.info @@ -0,0 +1,4 @@ +.\slave-relay-bin.000001 +4 + +0 diff --git a/mysql-test/std_data/old-format-relay-log.info b/mysql-test/std_data/old-format-relay-log.info new file mode 100644 index 00000000000..6043b4058f6 --- /dev/null +++ b/mysql-test/std_data/old-format-relay-log.info @@ -0,0 +1,4 @@ +./slave-relay-bin.000001 +4 + +0 diff --git a/mysql-test/suite/archive/partition_archive.result b/mysql-test/suite/archive/partition_archive.result index bb3e531a2ed..eb1fca46522 100644 --- a/mysql-test/suite/archive/partition_archive.result +++ b/mysql-test/suite/archive/partition_archive.result @@ -127,3 +127,29 @@ select count(*) from t1; count(*) 100 drop table t1; +# +#BUG 18618561: FAILED ALTER TABLE ENGINE CHANGE WITH PARTITIONS +# CORRUPTS FRM +CREATE TABLE t1 (fld1 INT PRIMARY KEY) ENGINE= MYISAM PARTITION BY HASH(fld1) +PARTITIONS 5; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `fld1` int(11) NOT NULL, + PRIMARY KEY (`fld1`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY HASH (fld1) +PARTITIONS 5 */ +ALTER TABLE t1 ENGINE= ARCHIVE; +ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options") +#After the patch, the ENGINE is correctly displayed as MyISAM +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `fld1` int(11) NOT NULL, + PRIMARY KEY (`fld1`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY HASH (fld1) +PARTITIONS 5 */ +#Cleanup. +DROP TABLE t1; diff --git a/mysql-test/suite/archive/partition_archive.test b/mysql-test/suite/archive/partition_archive.test index be2abeada73..899f266c09c 100644 --- a/mysql-test/suite/archive/partition_archive.test +++ b/mysql-test/suite/archive/partition_archive.test @@ -129,3 +129,21 @@ show create table t1; select count(*) from t1; drop table t1; + +--echo # +--echo #BUG 18618561: FAILED ALTER TABLE ENGINE CHANGE WITH PARTITIONS +--echo # CORRUPTS FRM + +CREATE TABLE t1 (fld1 INT PRIMARY KEY) ENGINE= MYISAM PARTITION BY HASH(fld1) +PARTITIONS 5; +SHOW CREATE TABLE t1; + +--replace_regex /#sql-[0-9a-f_]*/#sql-temporary/ +--error ER_CANT_CREATE_TABLE +ALTER TABLE t1 ENGINE= ARCHIVE; + +--echo #After the patch, the ENGINE is correctly displayed as MyISAM +SHOW CREATE TABLE t1; + +--echo #Cleanup. +DROP TABLE t1; diff --git a/mysql-test/suite/binlog/t/binlog_killed.test b/mysql-test/suite/binlog/t/binlog_killed.test index 0a2ba084d78..73759ee5aa5 100644 --- a/mysql-test/suite/binlog/t/binlog_killed.test +++ b/mysql-test/suite/binlog/t/binlog_killed.test @@ -353,6 +353,10 @@ drop function bug27563; # common cleanup # +connection default; +disconnect con1; +disconnect con2; + drop table t1,t2,t3; --echo end of the tests diff --git a/mysql-test/suite/engines/iuds/r/insert_time.result b/mysql-test/suite/engines/iuds/r/insert_time.result index dceba37ae8e..6680886aad1 100644 --- a/mysql-test/suite/engines/iuds/r/insert_time.result +++ b/mysql-test/suite/engines/iuds/r/insert_time.result @@ -5035,9 +5035,9 @@ CAST(0.2359591234567e6 AS TIME) 23:59:59 SELECT CAST(0.2359591234567e+30 AS TIME); CAST(0.2359591234567e+30 AS TIME) -NULL +838:59:59 Warnings: -Warning 1292 Incorrect datetime value: '2.359591234567e29' +Warning 1292 Truncated incorrect time value: '2.359591234567e29' select cast('100:55:50' as time) < cast('24:00:00' as time); cast('100:55:50' as time) < cast('24:00:00' as time) 0 diff --git a/mysql-test/suite/engines/iuds/suite.opt b/mysql-test/suite/engines/iuds/suite.opt new file mode 100644 index 00000000000..e5648163418 --- /dev/null +++ b/mysql-test/suite/engines/iuds/suite.opt @@ -0,0 +1,2 @@ +--timezone=GMT-3 + diff --git a/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc b/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc new file mode 100644 index 00000000000..47494d60375 --- /dev/null +++ b/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc @@ -0,0 +1,147 @@ +--echo # +--echo # Testing robustness against random compression failures +--echo # + +--source include/not_embedded.inc +--source include/have_innodb.inc + +--disable_query_log +# record the file format in order to restore in the end. +--let $file_format_save = `SELECT @@innodb_file_format` +--let $file_format_max_save = `SELECT @@innodb_file_format_max` +--let $simulate_comp_failures_save = `SELECT @@innodb_simulate_comp_failures` + +--disable_warnings +DROP TABLE IF EXISTS t1; +SET GLOBAL INNODB_FILE_FORMAT='Barracuda'; +--enable_warnings + +# since this test generates lot of errors in log, suppress checking errors +call mtr.add_suppression(".*"); +--enable_query_log + +# create the table with compressed pages of size 8K. +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; + +# percentage of compressions that will be forced to fail +SET GLOBAL innodb_simulate_comp_failures = 25; + +--disable_query_log +--disable_result_log + +let $num_inserts_ind = $num_inserts; +while ($num_inserts_ind) +{ + let $repeat = `select floor(rand() * 10)`; + eval +INSERT INTO t1(id, msg) +VALUES ($num_inserts_ind, REPEAT('abcdefghijklmnopqrstuvwxyz', $repeat)); + dec $num_inserts_ind; +} + +--enable_query_log +--enable_result_log + +SELECT COUNT(*) FROM t1; + +--disable_query_log +--disable_result_log + +# do random ops, making sure that some pages will get fragmented and reorganized. +let $num_ops_ind = $num_ops; + +while($num_ops_ind) +{ + let $idx = `select floor(rand()*$num_inserts)`; + let $insert_or_update = `select floor(rand()*3)`; + + let $repeat = `select floor(rand() * 9) + 1`; + + let $msg = query_get_value(`select repeat('abcdefghijklmnopqrstuvwxyz', $repeat) as x`, x, 1); + + let $single_or_multi = `select floor(rand()*10)`; + + if ($insert_or_update) + { + let $cnt = query_get_value(SELECT COUNT(*) cnt FROM t1 WHERE id=$idx, cnt, 1); + + if ($cnt) + { + let $update = `select floor(rand()*2)`; + + if ($update) + { + if ($single_or_multi) + { + eval UPDATE t1 SET msg=\"$msg\" WHERE id=$idx; + } + + if (!$single_or_multi) + { + eval UPDATE t1 SET msg=\"$msg\" WHERE id >= $idx - 100 AND id <= $idx + 100; + } + + } + + if (!$update) + { + if ($single_or_multi) + { + eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id = VALUES(id); + } + + if (!$single_or_multi) + { + let $diff = 200; + + while ($diff) + { + eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx + 100 - $diff) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id=VALUES(id); + + dec $diff; + } + } + } + } + + if (!$cnt) + { + let $null_msg = `select floor(rand()*2)`; + + if ($null_msg) + { + eval INSERT INTO t1(id,msg) VALUES ($idx, NULL); + } + + if (!$null_msg) + { + eval INSERT INTO t1(id, msg) VALUES ($idx, \"$msg\"); + } + } + } + + if (!$insert_or_update) + { + if ($single_or_multi) + { + eval DELETE from t1 WHERE id=$idx; + } + + if (!$single_or_multi) + { + eval DELETE from t1 WHERE id >= $idx - 100 AND id <= $idx + 100; + } + } + + dec $num_ops_ind; +} + +# final cleanup +DROP TABLE t1; + +# restore innodb_file_format and innodb_file_format_max +eval SET GLOBAL innodb_file_format = \"$file_format_save\"; +eval SET GLOBAL innodb_file_format_max = \"$file_format_max_save\"; +eval SET GLOBAL innodb_simulate_comp_failures = $simulate_comp_failures_save; + +--enable_query_log diff --git a/mysql-test/suite/innodb/r/blob_unique2pk.result b/mysql-test/suite/innodb/r/blob_unique2pk.result new file mode 100644 index 00000000000..57953dc8624 --- /dev/null +++ b/mysql-test/suite/innodb/r/blob_unique2pk.result @@ -0,0 +1,15 @@ +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(255)); +drop table t1; +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(356)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` tinyblob NOT NULL, + UNIQUE KEY `f1` (`f1`(255)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +drop table t1; +create table t1 (f1 point not null) engine=innodb; +alter table t1 add unique index (f1); +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb-alter-table-disk-full.result b/mysql-test/suite/innodb/r/innodb-alter-table-disk-full.result new file mode 100644 index 00000000000..ffeacae7951 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-alter-table-disk-full.result @@ -0,0 +1,50 @@ +create table t1(a int not null primary key, b int) engine=innodb; +create procedure innodb_insert_proc (repeat_count int) +begin +declare current_num int; +set current_num = 0; +while current_num < repeat_count do +insert into t1 values(current_num, current_num); +set current_num = current_num + 1; +end while; +end// +commit; +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28'; +alter table t1 add testcol int; +ERROR HY000: The table 't1' is full +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28_2'; +alter table t1 add testcol int; +ERROR HY000: The table 't1' is full +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +set DEBUG_DBUG=NULL; +alter table t1 add testcol2 int; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + `testcol2` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +select count(1) from t1; +count(1) +10000 +drop procedure innodb_insert_proc; +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb-fk.result b/mysql-test/suite/innodb/r/innodb-fk.result new file mode 100644 index 00000000000..cf883d83874 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-fk.result @@ -0,0 +1,35 @@ +# +# Bug #18806829 OPENING INNODB TABLES WITH MANY FOREIGN KEY +# REFERENCES IS SLOW/CRASHES SEMAPHORE +# +create table t1 (f1 int primary key) engine=innodb; +insert into t1 values (5); +insert into t1 values (2882); +insert into t1 values (10); +update t1 set f1 = 28 where f1 = 2882; +select * from fk_120; +f1 +5 +10 +28 +select * from fk_1; +f1 +5 +10 +28 +select * from fk_50; +f1 +5 +10 +28 +drop table t1; +# +# Check if restrict is working fine. +# +create table t1 (f1 int primary key) engine=innodb; +delete from t1 where f1 = 29; +ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`fk_29`, CONSTRAINT `pc29` FOREIGN KEY (`f1`) REFERENCES `t1` (`f1`)) +select * from fk_29; +f1 +29 +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb-stats-initialize-failure.result b/mysql-test/suite/innodb/r/innodb-stats-initialize-failure.result new file mode 100644 index 00000000000..ef2d3182b92 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-stats-initialize-failure.result @@ -0,0 +1,32 @@ +call mtr.add_suppression("InnoDB: Warning: Index.*"); +set DEBUG_DBUG='+d,ib_ha_innodb_stat_not_initialized'; +create table t1(a int not null primary key, b int, c int, key(b), key(c)) engine=innodb; +create procedure innodb_insert_proc (repeat_count int) +begin +declare current_num int; +set current_num = 0; +while current_num < repeat_count do +insert into t1 values(current_num, current_num, current_num); +set current_num = current_num + 1; +end while; +end// +commit; +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; +select count(1) from t1; +count(1) +10000 +select count(1) from t1 where a between 5 and 100; +count(1) +96 +select count(1) from t1 where b between 5 and 256; +count(1) +252 +select count(1) from t1 where c between 7 and 787; +count(1) +781 +set DEBUG_DBUG=NULL; +drop procedure innodb_insert_proc; +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result new file mode 100644 index 00000000000..cb7a3b9c282 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result @@ -0,0 +1,8 @@ +# +# Testing robustness against random compression failures +# +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SET GLOBAL innodb_simulate_comp_failures = 25; +SELECT COUNT(*) FROM t1; +COUNT(*) +100000 diff --git a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result new file mode 100644 index 00000000000..1cb2cf77050 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result @@ -0,0 +1,8 @@ +# +# Testing robustness against random compression failures +# +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SET GLOBAL innodb_simulate_comp_failures = 25; +SELECT COUNT(*) FROM t1; +COUNT(*) +10000 diff --git a/mysql-test/suite/innodb/t/blob_unique2pk.test b/mysql-test/suite/innodb/t/blob_unique2pk.test new file mode 100644 index 00000000000..ff6720690dd --- /dev/null +++ b/mysql-test/suite/innodb/t/blob_unique2pk.test @@ -0,0 +1,20 @@ +--source include/have_innodb.inc + + +# +# Bug#16368875 INNODB: FAILING ASSERTION: PRIMARY_KEY_NO == -1 || PRIMARY_KEY_NO == 0 +# +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(255)); +drop table t1; + +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(356)); +show create table t1; +drop table t1; + +create table t1 (f1 point not null) engine=innodb; +alter table t1 add unique index (f1); +drop table t1; + + diff --git a/mysql-test/suite/innodb/t/innodb-alter-table-disk-full-master.opt b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full-master.opt new file mode 100644 index 00000000000..9c2ee7846b6 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full-master.opt @@ -0,0 +1 @@ +--innodb-use-native-aio=0 diff --git a/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test new file mode 100644 index 00000000000..4e3a7bfdae6 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test @@ -0,0 +1,47 @@ +# MDEV-6288: Innodb causes server crash after disk full, then can't ALTER TABLE any more +--source include/have_innodb.inc + +# DEBUG_SYNC must be compiled in. +--source include/have_debug_sync.inc + +create table t1(a int not null primary key, b int) engine=innodb; + +delimiter //; +create procedure innodb_insert_proc (repeat_count int) +begin + declare current_num int; + set current_num = 0; + while current_num < repeat_count do + insert into t1 values(current_num, current_num); + set current_num = current_num + 1; + end while; +end// +delimiter ;// +commit; + +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; + +# This caused crash earlier +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28'; +--error 1114 +alter table t1 add testcol int; +show create table t1; + +# This caused crash earlier +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28_2'; +--error 1114 +alter table t1 add testcol int; +show create table t1; + +set DEBUG_DBUG=NULL; +alter table t1 add testcol2 int; +show create table t1; + +select count(1) from t1; + +drop procedure innodb_insert_proc; +drop table t1; + diff --git a/mysql-test/suite/innodb/t/innodb-fk.test b/mysql-test/suite/innodb/t/innodb-fk.test new file mode 100644 index 00000000000..9839cd2d084 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-fk.test @@ -0,0 +1,86 @@ +--source include/have_innodb.inc +--source include/not_embedded.inc + +--echo # +--echo # Bug #18806829 OPENING INNODB TABLES WITH MANY FOREIGN KEY +--echo # REFERENCES IS SLOW/CRASHES SEMAPHORE +--echo # + +create table t1 (f1 int primary key) engine=innodb; +insert into t1 values (5); +insert into t1 values (2882); +insert into t1 values (10); + +let $fk_tables = 120; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval create table fk_$i (f1 int primary key, + constraint pc$i foreign key (f1) references t1(f1) + on delete cascade on update cascade) engine=innodb; + eval insert into fk_$i values (5); + eval insert into fk_$i values (2882); + eval insert into fk_$i values (10); + dec $i; +} +--enable_query_log + +--source include/restart_mysqld.inc + +update t1 set f1 = 28 where f1 = 2882; + +select * from fk_120; +select * from fk_1; +select * from fk_50; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval drop table fk_$i; + dec $i; +} +--enable_query_log + +drop table t1; + +--echo # +--echo # Check if restrict is working fine. +--echo # + +create table t1 (f1 int primary key) engine=innodb; + +let $fk_tables = 30; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval create table fk_$i (f1 int primary key, + constraint pc$i foreign key (f1) references t1(f1) + on delete restrict on update restrict) engine=innodb; + eval insert into t1 values ($i); + eval insert into fk_$i values ($i); + dec $i; +} +--enable_query_log + +--source include/restart_mysqld.inc + +--error ER_ROW_IS_REFERENCED_2 +delete from t1 where f1 = 29; +select * from fk_29; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval drop table fk_$i; + dec $i; +} +--enable_query_log + +drop table t1; + diff --git a/mysql-test/suite/innodb/t/innodb-stats-initialize-failure.test b/mysql-test/suite/innodb/t/innodb-stats-initialize-failure.test new file mode 100644 index 00000000000..e480f0caf07 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-stats-initialize-failure.test @@ -0,0 +1,39 @@ +# MDEV-6424: Mariadb server crashes with assertion failure in file ha_innodb.cc +--source include/have_innodb.inc + +# DEBUG_SYNC must be compiled in. +--source include/have_debug_sync.inc + +call mtr.add_suppression("InnoDB: Warning: Index.*"); +# This caused crash earlier +set DEBUG_DBUG='+d,ib_ha_innodb_stat_not_initialized'; +create table t1(a int not null primary key, b int, c int, key(b), key(c)) engine=innodb; + +delimiter //; +create procedure innodb_insert_proc (repeat_count int) +begin + declare current_num int; + set current_num = 0; + while current_num < repeat_count do + insert into t1 values(current_num, current_num, current_num); + set current_num = current_num + 1; + end while; +end// +delimiter ;// +commit; + +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; + +select count(1) from t1; +select count(1) from t1 where a between 5 and 100; +select count(1) from t1 where b between 5 and 256; +select count(1) from t1 where c between 7 and 787; + +set DEBUG_DBUG=NULL; + +drop procedure innodb_insert_proc; +drop table t1; + diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt new file mode 100644 index 00000000000..fae32059249 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt @@ -0,0 +1,2 @@ +--innodb-file-per-table + diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test new file mode 100644 index 00000000000..a940a926f85 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test @@ -0,0 +1,8 @@ +--source include/big_test.inc +# test takes too long with valgrind +--source include/not_valgrind.inc +--let $num_inserts = 100000 +--let $num_ops = 30000 +--source suite/innodb/include/innodb_simulate_comp_failures.inc +# clean exit +--exit diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt new file mode 100644 index 00000000000..fae32059249 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt @@ -0,0 +1,2 @@ +--innodb-file-per-table + diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test new file mode 100644 index 00000000000..7332a8c6ba4 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test @@ -0,0 +1,5 @@ +--let $num_inserts = 10000 +--let $num_ops = 3000 +--source suite/innodb/include/innodb_simulate_comp_failures.inc +# clean exit +--exit diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result index 021cc8fc357..74eed530bd9 100644 --- a/mysql-test/suite/maria/maria3.result +++ b/mysql-test/suite/maria/maria3.result @@ -314,6 +314,7 @@ aria_max_sort_file_size 9223372036853727232 aria_pagecache_age_threshold 300 aria_pagecache_buffer_size 8388608 aria_pagecache_division_limit 100 +aria_pagecache_file_hash_size 512 aria_page_checksum OFF aria_recover NORMAL aria_repair_threads 1 diff --git a/mysql-test/suite/maria/maria_partition.result b/mysql-test/suite/maria/maria_partition.result index 372230c0b71..1c4f0fbaf05 100644 --- a/mysql-test/suite/maria/maria_partition.result +++ b/mysql-test/suite/maria/maria_partition.result @@ -33,3 +33,18 @@ insert into t1 values (2); select * from t2 left join t1 on (t2.a=t1.a) where t2.a='bbb'; a a drop table t1,t2; +CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=Aria PARTITION BY KEY() PARTITIONS 2; +CREATE VIEW v1 AS SELECT * FROM t1; +LOCK TABLE v1 WRITE; +CREATE TABLE v1 (i INT); +ERROR HY000: Table 'v1' was not locked with LOCK TABLES +INSERT INTO v1 VALUES (1); +UNLOCK TABLES; +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1; +pk +1 +drop table t1; +drop view v1; diff --git a/mysql-test/suite/maria/maria_partition.test b/mysql-test/suite/maria/maria_partition.test index 47571c7a4be..ca2651bcdc3 100644 --- a/mysql-test/suite/maria/maria_partition.test +++ b/mysql-test/suite/maria/maria_partition.test @@ -49,6 +49,28 @@ insert into t1 values (2); select * from t2 left join t1 on (t2.a=t1.a) where t2.a='bbb'; drop table t1,t2; +# +# MDEV-6493 +# Assertion `table->file->stats.records > 0 || error' +# failure, or 'Invalid write' valgrind warnings, or crash on scenario +# with Aria table, view, LOCK TABLES # +# + +CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=Aria PARTITION BY KEY() PARTITIONS 2; +CREATE VIEW v1 AS SELECT * FROM t1; + +LOCK TABLE v1 WRITE; +--error 1100 +CREATE TABLE v1 (i INT); +INSERT INTO v1 VALUES (1); +UNLOCK TABLES; +check table t1; + +SELECT * FROM t1; + +drop table t1; +drop view v1; + # Set defaults back --disable_result_log --disable_query_log diff --git a/mysql-test/suite/plugins/r/false_dupes-6543.result b/mysql-test/suite/plugins/r/false_dupes-6543.result new file mode 100644 index 00000000000..22accaaae8a --- /dev/null +++ b/mysql-test/suite/plugins/r/false_dupes-6543.result @@ -0,0 +1,5 @@ +install soname 'ha_federated'; +install soname 'ha_federated'; +install soname 'ha_federatedx'; +ERROR HY000: Function 'FEDERATED' already exists +uninstall soname 'ha_federated'; diff --git a/mysql-test/suite/plugins/t/false_dupes-6543.test b/mysql-test/suite/plugins/t/false_dupes-6543.test new file mode 100644 index 00000000000..ebdbe00e47c --- /dev/null +++ b/mysql-test/suite/plugins/t/false_dupes-6543.test @@ -0,0 +1,18 @@ +# +# MDEV-6543 Crash if enable 'federatedx' when 'federated' plugin already enabled, and vice-versa +# +if(!$HA_FEDERATED_SO) { + skip Needs ha_federated.so; +} +if(!$HA_FEDERATEDX_SO) { + skip Needs ha_federatedx.so; +} + +install soname 'ha_federated'; +# note: no error below! install soname ignores already loaded plugins +install soname 'ha_federated'; +# note: an error here, even though plugin name is the same! +--error ER_UDF_EXISTS +install soname 'ha_federatedx'; +uninstall soname 'ha_federated'; + diff --git a/mysql-test/suite/roles/ip-6401.result b/mysql-test/suite/roles/ip-6401.result new file mode 100644 index 00000000000..a9876eb8273 --- /dev/null +++ b/mysql-test/suite/roles/ip-6401.result @@ -0,0 +1,13 @@ +create role r1; +create user foo@'127.0.0.1'; +grant r1 to foo@'127.0.0.1'; +show grants; +Grants for foo@127.0.0.1 +GRANT r1 TO 'foo'@'127.0.0.1' +GRANT USAGE ON *.* TO 'foo'@'127.0.0.1' +set role r1; +select * from information_schema.enabled_roles; +ROLE_NAME +r1 +drop user foo@'127.0.0.1'; +drop role r1; diff --git a/mysql-test/suite/roles/ip-6401.test b/mysql-test/suite/roles/ip-6401.test new file mode 100644 index 00000000000..34d8553afa3 --- /dev/null +++ b/mysql-test/suite/roles/ip-6401.test @@ -0,0 +1,13 @@ +--source include/not_embedded.inc +create role r1; +create user foo@'127.0.0.1'; +grant r1 to foo@'127.0.0.1'; + +--connect (con1,127.0.0.1,foo,,) +show grants; +set role r1; +select * from information_schema.enabled_roles; + +connection default; +drop user foo@'127.0.0.1'; +drop role r1; diff --git a/mysql-test/suite/rpl/r/failed_create_view-6409.result b/mysql-test/suite/rpl/r/failed_create_view-6409.result new file mode 100644 index 00000000000..6b04f2960d1 --- /dev/null +++ b/mysql-test/suite/rpl/r/failed_create_view-6409.result @@ -0,0 +1,15 @@ +create table v1 (a int); +include/master-slave.inc +[connection master] +create table t1 (a int); +create view v1 as select * from t1; +ERROR 42S01: Table 'v1' already exists +show tables; +Tables_in_test +t1 +v1 +show tables; +Tables_in_test +t1 +drop table if exists t1, v1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/kill_hard-6290.result b/mysql-test/suite/rpl/r/kill_hard-6290.result new file mode 100644 index 00000000000..27b62416368 --- /dev/null +++ b/mysql-test/suite/rpl/r/kill_hard-6290.result @@ -0,0 +1,4 @@ +include/master-slave.inc +[connection master] +kill user test2@nohost; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_checksum.result b/mysql-test/suite/rpl/r/rpl_checksum.result index 2229ab63ee4..d88258f3b65 100644 --- a/mysql-test/suite/rpl/r/rpl_checksum.result +++ b/mysql-test/suite/rpl/r/rpl_checksum.result @@ -128,7 +128,33 @@ insert into t3 value (1, @@global.binlog_checksum); drop table t1, t2, t3; set @@global.binlog_checksum = @master_save_binlog_checksum; set @@global.master_verify_checksum = @save_master_verify_checksum; +*** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** +CREATE TABLE t4 (a INT PRIMARY KEY); +INSERT INTO t4 VALUES (1); +SET sql_log_bin=0; +CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET debug_dbug= '+d,binlog_inject_new_name_error'; +FLUSH LOGS; +ERROR HY000: Can't generate a unique log-filename master-bin.(1-999) + +SET debug_dbug= @old_dbug; +INSERT INTO t4 VALUES (2); +include/wait_for_slave_sql_error.inc [errno=1590] +Last_SQL_Error = 'The incident LOST_EVENTS occured on the master. Message: error writing to the binary log' +SELECT * FROM t4 ORDER BY a; +a +1 +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter= 1; +include/start_slave.inc +SELECT * FROM t4 ORDER BY a; +a +1 +2 set @@global.binlog_checksum = @slave_save_binlog_checksum; set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; End of tests +DROP TABLE t4; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_basic.result b/mysql-test/suite/rpl/r/rpl_gtid_basic.result index fd33221814d..e8e5bf36f84 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_basic.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_basic.result @@ -61,13 +61,7 @@ include/stop_slave.inc INSERT INTO t1 VALUES (5, "m1a"); INSERT INTO t2 VALUES (5, "i1a"); CHANGE MASTER TO master_host = '127.0.0.1', master_port = MASTER_PORT, -MASTER_USE_GTID=SLAVE_POS; -SET GLOBAL sql_slave_skip_counter=1; -ERROR HY000: When using GTID, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position. -CHANGE MASTER TO master_host = '127.0.0.1', master_port = MASTER_PORT, MASTER_USE_GTID=CURRENT_POS; -SET GLOBAL sql_slave_skip_counter=10; -ERROR HY000: When using GTID, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position. include/start_slave.inc SELECT * FROM t1 ORDER BY a; a b @@ -191,14 +185,8 @@ master-bin.000001 # include/show_binlog_events.inc Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Format_desc # # SERVER_VERSION, BINLOG_VERSION -master-bin.000001 # Gtid_list # # [1-2-20,0-1-10,0-3-30] +master-bin.000001 # Gtid_list # # [#-#-#] master-bin.000001 # Binlog_checkpoint # # master-bin.000001 -SELECT @@GLOBAL.gtid_binlog_pos; -@@GLOBAL.gtid_binlog_pos -1-2-20,0-3-30 -SELECT @@GLOBAL.gtid_binlog_state; -@@GLOBAL.gtid_binlog_state -1-2-20,0-1-10,0-3-30 SET GLOBAL gtid_binlog_state = @old_state; ERROR HY000: This operation is not allowed if any GTID has been logged to the binary log. Run RESET MASTER first to erase the log RESET MASTER; @@ -258,7 +246,7 @@ a include/stop_slave.inc SET gtid_domain_id= 1; INSERT INTO t1 VALUES (3); -SET @pos= '1-1-1,0-1-110'; +SET @pos= 'POS'; SELECT master_gtid_wait(@pos, 0); master_gtid_wait(@pos, 0) -1 @@ -322,5 +310,108 @@ master_gtid_wait('2-1-10') 0 master_gtid_wait('2-1-10') 0 +*** Test sql_gtid_slave_pos when used with GTID *** +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1000; +INSERT INTO t1 VALUES (10); +INSERT INTO t1 VALUES (11); +SET sql_slave_skip_counter= 1; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1010; +INSERT INTO t1 VALUES (12); +INSERT INTO t1 VALUES (13); +SET sql_slave_skip_counter= 2; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +13 +SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1020; +INSERT INTO t1 VALUES (14); +INSERT INTO t1 VALUES (15); +INSERT INTO t1 VALUES (16); +SET sql_slave_skip_counter= 3; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +13 +15 +16 +SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1030; +INSERT INTO t1 VALUES (17); +INSERT INTO t1 VALUES (18); +INSERT INTO t1 VALUES (19); +SET sql_slave_skip_counter= 5; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +13 +15 +16 +19 +SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=3; +SET gtid_seq_no=100; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; +SET gtid_domain_id=2; +SET gtid_seq_no=1040; +INSERT INTO t1 VALUES (20); +SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; +SET GLOBAL slave_ddl_exec_mode=STRICT; +SET sql_slave_skip_counter=1; +START SLAVE UNTIL master_gtid_pos="3-1-100"; +include/sync_with_master_gtid.inc +include/wait_for_slave_sql_to_stop.inc +SELECT * FROM t2; +ERROR 42S02: Table 'test.t2' doesn't exist +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); +SET sql_log_bin=1; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1051] +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter=2; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +a +20 +SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +SET GLOBAL slave_ddl_exec_mode= @saved_mode; DROP TABLE t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_crash.result b/mysql-test/suite/rpl/r/rpl_gtid_crash.result index fdbd1cc6898..45482e3b03e 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_crash.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_crash.result @@ -46,7 +46,7 @@ master-bin.000002 # master-bin.000003 # SHOW BINLOG EVENTS IN 'master-bin.000003' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000003 # Gtid_list # # [1-1-2,2-1-1,0-1-1] +master-bin.000003 # Gtid_list # # # SET SESSION debug_dbug="+d,crash_dispatch_command_before"; SELECT 1; Got one of the listed errors @@ -58,7 +58,7 @@ master-bin.000003 # master-bin.000004 # SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000004 # Gtid_list # # [1-1-2,0-1-1,2-1-1] +master-bin.000004 # Gtid_list # # # SELECT * FROM t1 ORDER BY a; a 1 diff --git a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result new file mode 100644 index 00000000000..204615201d9 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result @@ -0,0 +1,42 @@ +include/master-slave.inc +[connection master] +*** Test MDEV-6120, output of current GTID when a replication error is logged to the errorlog *** +CREATE TABLE t1(a INT PRIMARY KEY); +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +INSERT INTO t1 VALUES (1); +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (2); +INSERT INTO t1 VALUES (3); +INSERT INTO t1 VALUES (4); +SET sql_log_bin=0; +INSERT INTO t1 VALUES (2); +SET sql_log_bin=1; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1062] +include/stop_slave.inc +SET GLOBAL gtid_slave_pos= "0-1-100"; +include/start_slave.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +4 +SET @dbug_save= @@debug_dbug; +SET debug_dbug= '+d,incident_database_resync_on_replace'; +REPLACE INTO t1 VALUES (5); +SET debug_dbug= @dbug_save; +include/wait_for_slave_sql_error.inc [errno=1590] +include/stop_slave.inc +SET sql_slave_skip_counter=1; +include/start_slave.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +4 +5 +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result b/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result index 132c01f5f55..599c35164dd 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result @@ -119,7 +119,7 @@ a b 2 3 *** Now replicate all extra changes from 3,4,5 to 2, in preparation for making 2 the new master. *** CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_3; -START SLAVE UNTIL master_gtid_pos = "1-1-1,0-1-3,3-1-7,2-1-4"; +START SLAVE UNTIL master_gtid_pos = "SERVER3_POS"; include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; a @@ -142,7 +142,7 @@ a b 3 1 3 3 CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_4; -START SLAVE UNTIL master_gtid_pos = "1-1-7,0-1-3,3-1-4,2-1-1"; +START SLAVE UNTIL master_gtid_pos = "SERVER4_POS"; include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; a @@ -168,7 +168,7 @@ a b 3 1 3 3 CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_5; -START SLAVE UNTIL master_gtid_pos = "1-1-4,0-1-3,3-1-1,2-1-7"; +START SLAVE UNTIL master_gtid_pos = "SERVER5_POS"; include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; a diff --git a/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result b/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result index ddcbaf8dffd..60c8e4666b9 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result @@ -34,10 +34,10 @@ master-bin.000003 # Gtid_list # # [0-1-3] FLUSH LOGS; SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000004 # Gtid_list # # [1-1-1,0-1-4] +master-bin.000004 # Gtid_list # # # SHOW BINLOG EVENTS IN 'master-bin.000005' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000005 # Gtid_list # # [1-1-1,0-1-4] +master-bin.000005 # Gtid_list # # # show binary logs; Log_name File_size master-bin.000002 # diff --git a/mysql-test/suite/rpl/r/rpl_gtid_until.result b/mysql-test/suite/rpl/r/rpl_gtid_until.result index 53100af4c71..d86b5cac78c 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_until.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_until.result @@ -168,7 +168,7 @@ a include/stop_slave.inc CREATE TABLE t3 (a INT); DROP TABLE t3; -START SLAVE UNTIL master_gtid_pos='1-1-5,2-1-5,0-1-6'; +START SLAVE UNTIL master_gtid_pos='UNTIL_CONDITION'; include/wait_for_slave_to_stop.inc SHOW CREATE TABLE t3; Table Create Table diff --git a/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result b/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result new file mode 100644 index 00000000000..b9dec686e4a --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result @@ -0,0 +1,25 @@ +include/master-slave.inc +[connection master] +include/stop_slave.inc +set @restore_slave_net_timeout= @@global.slave_net_timeout; +set @@global.slave_net_timeout= 10; +show status like 'Slave_heartbeat_period';; +Variable_name Slave_heartbeat_period +Value 60.000 +SET @save_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,simulate_slave_heartbeat_network_error"; +CALL mtr.add_suppression('SET @master_heartbeat_period to master failed with error'); +CALL mtr.add_suppression('Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again'); +include/start_slave.inc +drop table if exists t1; +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (1); +SELECT * FROM t1; +a +1 +drop table t1; +include/stop_slave.inc +SET GLOBAL debug_dbug=@save_dbug; +set @@global.slave_net_timeout= @restore_slave_net_timeout; +include/start_slave.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_mdev6020.result b/mysql-test/suite/rpl/r/rpl_mdev6020.result new file mode 100644 index 00000000000..0855f578cfc --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_mdev6020.result @@ -0,0 +1,49 @@ +include/master-slave.inc +[connection master] +include/stop_slave.inc +include/rpl_stop_server.inc [server_number=1] +include/rpl_start_server.inc [server_number=1] +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +SET @old_engine= @@GLOBAL.default_storage_engine; +SET GLOBAL default_storage_engine=InnoDB; +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=12; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4; +include/start_slave.inc +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +SELECT @@gtid_slave_pos; +@@gtid_slave_pos +0-1-1381 +CHECKSUM TABLE table0_int_autoinc, table0_key_pk_parts_2_int_autoinc, table100_int_autoinc, table100_key_pk_parts_2_int_autoinc, table10_int_autoinc, table10_key_pk_parts_2_int_autoinc, table1_int_autoinc, table1_key_pk_parts_2_int_autoinc, table2_int_autoinc, table2_key_pk_parts_2_int_autoinc; +Table Checksum +test.table0_int_autoinc 3623174395 +test.table0_key_pk_parts_2_int_autoinc 2888328157 +test.table100_int_autoinc 3624823809 +test.table100_key_pk_parts_2_int_autoinc 3316583308 +test.table10_int_autoinc 1615053718 +test.table10_key_pk_parts_2_int_autoinc 4147461080 +test.table1_int_autoinc 478809705 +test.table1_key_pk_parts_2_int_autoinc 3032208641 +test.table2_int_autoinc 854763867 +test.table2_key_pk_parts_2_int_autoinc 4231615291 +include/stop_slave.inc +SET GLOBAL default_storage_engine= @old_engine; +SET GLOBAL slave_parallel_threads=@old_parallel; +SET sql_log_bin=0; +DROP TABLE table0_int_autoinc; +DROP TABLE table0_key_pk_parts_2_int_autoinc; +DROP TABLE table100_int_autoinc; +DROP TABLE table100_key_pk_parts_2_int_autoinc; +DROP TABLE table10_int_autoinc; +DROP TABLE table10_key_pk_parts_2_int_autoinc; +DROP TABLE table1_int_autoinc; +DROP TABLE table1_key_pk_parts_2_int_autoinc; +DROP TABLE table2_int_autoinc; +DROP TABLE table2_key_pk_parts_2_int_autoinc; +SET sql_log_bin=1; +include/start_slave.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_mdev6386.result b/mysql-test/suite/rpl/r/rpl_mdev6386.result new file mode 100644 index 00000000000..352b9d07fef --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_mdev6386.result @@ -0,0 +1,56 @@ +include/master-slave.inc +[connection master] +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +FLUSH LOGS; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) Engine=InnoDB; +include/stop_slave.inc +SET sql_log_bin= 0; +INSERT INTO t1 VALUES (1, 2); +SET sql_log_bin= 1; +CHANGE MASTER TO master_use_gtid= current_pos; +Contents on slave before: +SELECT * FROM t1 ORDER BY a; +a b +1 2 +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=8; +CREATE TEMPORARY TABLE t2 LIKE t1; +INSERT INTO t2 VALUE (1, 1); +INSERT INTO t2 VALUE (2, 1); +INSERT INTO t2 VALUE (3, 1); +INSERT INTO t2 VALUE (4, 1); +INSERT INTO t2 VALUE (5, 1); +INSERT INTO t1 SELECT * FROM t2; +DROP TEMPORARY TABLE t2; +Contents on master: +SELECT * FROM t1 ORDER BY a; +a b +1 1 +2 1 +3 1 +4 1 +5 1 +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1062] +STOP SLAVE IO_THREAD; +Contents on slave on slave error: +SELECT * FROM t1 ORDER BY a; +a b +1 2 +SET sql_log_bin= 0; +DELETE FROM t1 WHERE a=1; +SET sql_log_bin= 1; +include/start_slave.inc +Contents on slave after: +SELECT * FROM t1 ORDER BY a; +a b +1 1 +2 1 +3 1 +4 1 +5 1 +DROP TABLE t1; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads= @old_parallel; +include/start_slave.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel.result b/mysql-test/suite/rpl/r/rpl_parallel.result index 20b75cbbdab..1c686e44a25 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel.result +++ b/mysql-test/suite/rpl/r/rpl_parallel.result @@ -314,7 +314,7 @@ SET debug_sync='now WAIT_FOR t1_ready'; KILL THD_ID; SET debug_sync='now WAIT_FOR t2_killed'; SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1964] +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] STOP SLAVE IO_THREAD; SELECT * FROM t3 WHERE a >= 30 ORDER BY a; a b @@ -398,7 +398,7 @@ SET debug_sync='now WAIT_FOR t1_ready'; KILL THD_ID; SET debug_sync='now WAIT_FOR t2_killed'; SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1964] +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] SET debug_sync='RESET'; SET GLOBAL slave_parallel_threads=0; SET GLOBAL slave_parallel_threads=10; @@ -481,7 +481,7 @@ SET debug_sync='now WAIT_FOR t1_ready'; KILL THD_ID; SET debug_sync='now WAIT_FOR t2_killed'; SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1964] +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] SELECT * FROM t3 WHERE a >= 50 ORDER BY a; a b 51 51 @@ -819,11 +819,37 @@ test_check OK test_check OK +*** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; +INSERT INTO t6 VALUES (1), (2), (3); +SET debug_sync='now WAIT_FOR ready'; +KILL QUERY CONID; +SET debug_sync='now SIGNAL cont'; +ERROR 70100: Query execution was interrupted +SET binlog_format= @old_format; +SET debug_sync='RESET'; +SET debug_sync='RESET'; +include/wait_for_slave_sql_error.inc [errno=1317] +STOP SLAVE IO_THREAD; +SET GLOBAL gtid_slave_pos= 'AFTER_ERROR_GTID_POS'; +include/start_slave.inc +INSERT INTO t6 VALUES (4); +SELECT * FROM t6 ORDER BY a; +a +1 +4 +SELECT * FROM t6 ORDER BY a; +a +4 include/stop_slave.inc SET GLOBAL slave_parallel_threads=@old_parallel_threads; include/start_slave.inc SET DEBUG_SYNC= 'RESET'; DROP function foo; -DROP TABLE t1,t2,t3,t4,t5; +DROP TABLE t1,t2,t3,t4,t5,t6; SET DEBUG_SYNC= 'RESET'; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_retry.result b/mysql-test/suite/rpl/r/rpl_parallel_retry.result new file mode 100644 index 00000000000..cd12d92430b --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_retry.result @@ -0,0 +1,196 @@ +include/rpl_init.inc [topology=1->2] +*** Test retry of transactions that fail to replicate due to deadlock or similar temporary error. *** +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1); +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=5; +include/start_slave.inc +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +include/stop_slave.inc +SET gtid_seq_no = 100; +BEGIN; +INSERT INTO t1 VALUES (2,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (3,1); +COMMIT; +SELECT * FROM t1 ORDER BY a; +a b +1 2 +2 1 +3 1 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +retries +1 +SELECT * FROM t1 ORDER BY a; +a b +1 2 +2 1 +3 1 +*** Test that double retry works when the first retry also fails with temp error *** +include/stop_slave.inc +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 10; +BEGIN; +INSERT INTO t1 VALUES (4,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (5,1); +INSERT INTO t1 VALUES (6,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +a b +1 3 +2 1 +3 1 +4 1 +5 1 +6 1 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_double_temp_err_gtid_0_x_100"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +retries +2 +SELECT * FROM t1 ORDER BY a; +a b +1 3 +2 1 +3 1 +4 1 +5 1 +6 1 +*** Test too many retries, eventually causing failure. *** +include/stop_slave.inc +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 11; +BEGIN; +INSERT INTO t1 VALUES (7,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (8,1); +INSERT INTO t1 VALUES (9,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +a b +1 4 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave worker thread retried transaction 10 time\\(s\\) in vain, giving up"); +CALL mtr.add_suppression("Slave: Deadlock found when trying to get lock; try restarting transaction"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100"; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1213] +SET GLOBAL debug_dbug=@old_dbug; +retries +10 +SELECT * FROM t1 ORDER BY a; +a b +1 3 +2 1 +3 1 +4 1 +5 1 +6 1 +STOP SLAVE IO_THREAD; +include/start_slave.inc +SELECT * FROM t1 ORDER BY a; +a b +1 4 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +*** Test retry of event group that spans multiple relay log files. *** +CREATE TABLE t2 (a int PRIMARY KEY, b BLOB) ENGINE=InnoDB; +INSERT INTO t2 VALUES (1,"Hulubullu"); +include/stop_slave.inc +SET @old_max= @@GLOBAL.max_relay_log_size; +SET GLOBAL max_relay_log_size=4096; +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 12; +BEGIN; +INSERT INTO t1 VALUES (10, 4); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a b +10 4 +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +a LENGTH(b) +1 9 +2 5006 +3 5012 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +retries +1 +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a b +10 4 +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +a LENGTH(b) +1 9 +2 5006 +3 5012 +INSERT INTO t1 VALUES (11,11); +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a b +10 4 +11 11 +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +a LENGTH(b) +1 9 +2 5006 +3 5012 +4 5000 +SET GLOBAL max_relay_log_size=@old_max; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +DROP TABLE t1, t2; +DROP function foo; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_read_new_relay_log_info.result b/mysql-test/suite/rpl/r/rpl_read_new_relay_log_info.result new file mode 100644 index 00000000000..e659c3ee283 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_read_new_relay_log_info.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +==== Check that we can understand the new format of relay-log.info ==== +include/stop_slave.inc +RESET SLAVE; +# Read relay-log.info +START SLAVE IO_THREAD; +include/wait_for_slave_io_to_start.inc +# Check that relay log coordinates are equal to those saved in new-format_relay-log.info += , 0, slave-relay-bin.000001, 4 +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_read_old_relay_log_info.result b/mysql-test/suite/rpl/r/rpl_read_old_relay_log_info.result new file mode 100644 index 00000000000..7a9d3b795d8 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_read_old_relay_log_info.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +==== Check that we still understand the old format of relay-log.info ==== +include/stop_slave.inc +RESET SLAVE; +# Read relay-log.info +START SLAVE IO_THREAD; +include/wait_for_slave_io_to_start.inc +# Check that relay log coordinates are equal to those we saved in old-format_relay-log.info += , 0, slave-relay-bin.000001, 4 +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_uninstall_plugin.result b/mysql-test/suite/rpl/r/rpl_semi_sync_uninstall_plugin.result new file mode 100644 index 00000000000..0809af5f943 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_uninstall_plugin.result @@ -0,0 +1,63 @@ +include/master-slave.inc +[connection master] +call mtr.add_suppression("Read semi-sync reply network error"); +call mtr.add_suppression("Timeout waiting for reply of binlog"); +INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master'; +[connection slave] +INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave'; +UNINSTALL PLUGIN rpl_semi_sync_slave; +[connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; +CREATE TABLE t1(i int); +INSERT INTO t1 values (1); +DROP TABLE t1; +[connection slave] +include/install_semisync.inc +[connection slave] +UNINSTALL PLUGIN rpl_semi_sync_slave; +Warnings: +Warning 1620 Plugin is busy and will be uninstalled on shutdown +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +rpl_semi_sync_slave DELETED +[connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; +Warnings: +Warning 1620 Plugin is busy and will be uninstalled on shutdown +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +rpl_semi_sync_master DELETED +CREATE TABLE t1(i int); +INSERT INTO t1 values (2); +DROP TABLE t1; +[connection slave] +show status like "Rpl_semi_sync_slave_status"; +Variable_name Value +Rpl_semi_sync_slave_status ON +[connection master] +show status like "Rpl_semi_sync_master_status"; +Variable_name Value +Rpl_semi_sync_master_status ON +show status like "Rpl_semi_sync_master_clients"; +Variable_name Value +Rpl_semi_sync_master_clients 1 +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +rpl_semi_sync_master DELETED +[connection slave] +include/stop_slave.inc +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +[connection master] +create table t2 (a int); +drop table t2; +[connection slave] +include/start_slave.inc +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +[connection master] +CREATE TABLE t1(i int); +INSERT INTO t1 values (3); +DROP TABLE t1; +[connection slave] +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_skip_incident.result b/mysql-test/suite/rpl/r/rpl_skip_incident.result new file mode 100644 index 00000000000..1dc0508af32 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_skip_incident.result @@ -0,0 +1,25 @@ +include/master-slave.inc +[connection master] +**** On Master **** +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; +a +1 +2 +3 +REPLACE INTO t1 VALUES (4); +SELECT * FROM t1; +a +1 +2 +3 +4 +include/check_slave_is_running.inc +Should have two binary logs here +show binary logs; +Log_name File_size +master-bin.000001 # +master-bin.000002 # +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_stop_slave.result b/mysql-test/suite/rpl/r/rpl_stop_slave.result index 5959ee09993..b93ecce3597 100644 --- a/mysql-test/suite/rpl/r/rpl_stop_slave.result +++ b/mysql-test/suite/rpl/r/rpl_stop_slave.result @@ -94,10 +94,12 @@ DROP TABLE t1, t2; CREATE TABLE t1 (c1 INT KEY, c2 INT) ENGINE=InnoDB; CREATE TABLE t2 (c1 INT) ENGINE=MyISAM; INSERT INTO t1 VALUES(1, 1); +include/stop_slave.inc [connection master] +include/stop_dump_threads.inc SET GLOBAL debug_dbug= '+d,dump_thread_wait_before_send_xid,*'; [connection slave] -include/restart_slave.inc +include/start_slave.inc BEGIN; UPDATE t1 SET c2 = 2 WHERE c1 = 1; [connection master] @@ -116,6 +118,9 @@ SET DEBUG_SYNC= 'now WAIT_FOR signal.continued'; [connection slave] include/wait_for_slave_to_stop.inc [connection slave1] +[connection master] +include/stop_dump_threads.inc +[connection slave1] include/start_slave.inc [connection master] DROP TABLE t1, t2; diff --git a/mysql-test/suite/rpl/r/rpl_table_options.result b/mysql-test/suite/rpl/r/rpl_table_options.result index d69570a1709..a94d6e9bc2f 100644 --- a/mysql-test/suite/rpl/r/rpl_table_options.result +++ b/mysql-test/suite/rpl/r/rpl_table_options.result @@ -12,6 +12,12 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12340 */ +set sql_mode=ignore_bad_table_options; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12340 drop table t1; set storage_engine=default; diff --git a/mysql-test/suite/rpl/t/failed_create_view-6409.test b/mysql-test/suite/rpl/t/failed_create_view-6409.test new file mode 100644 index 00000000000..5d96e6f8a93 --- /dev/null +++ b/mysql-test/suite/rpl/t/failed_create_view-6409.test @@ -0,0 +1,24 @@ +# +# MDEV-6409 CREATE VIEW replication problem if error occurs in mysql_register_view +# + +# +# +# verify that failed CREATE VIEW is not replicated + +create table v1 (a int); + +source include/master-slave.inc; + +connection master; +create table t1 (a int); +--error ER_TABLE_EXISTS_ERROR +create view v1 as select * from t1; +show tables; +sync_slave_with_master; +show tables; + +connection master; +drop table if exists t1, v1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/kill_hard-6290.test b/mysql-test/suite/rpl/t/kill_hard-6290.test new file mode 100644 index 00000000000..7624235666a --- /dev/null +++ b/mysql-test/suite/rpl/t/kill_hard-6290.test @@ -0,0 +1,11 @@ +# +# MDEV-6290 Crash in KILL HARD QUERY USER x@y when slave threads are running +# + +# this test doesn't depend on the binlog format, no need to run it three times +--source include/have_binlog_format_mixed.inc + +--source include/master-slave.inc +--connection server_2 +kill user test2@nohost; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_checksum.test b/mysql-test/suite/rpl/t/rpl_checksum.test index 0f0b84aa632..bd0ab7ecc9c 100644 --- a/mysql-test/suite/rpl/t/rpl_checksum.test +++ b/mysql-test/suite/rpl/t/rpl_checksum.test @@ -260,9 +260,67 @@ AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA== #connection slave; sync_slave_with_master; + + +--echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** + +--connection master + +--source include/wait_for_binlog_checkpoint.inc +CREATE TABLE t4 (a INT PRIMARY KEY); +INSERT INTO t4 VALUES (1); + +SET sql_log_bin=0; +CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET debug_dbug= '+d,binlog_inject_new_name_error'; +--error ER_NO_UNIQUE_LOGFILE +FLUSH LOGS; +SET debug_dbug= @old_dbug; + +INSERT INTO t4 VALUES (2); + +--connection slave +--let $slave_sql_errno= 1590 +--source include/wait_for_slave_sql_error.inc + +# Search the error log for the error message. +# The bug was that 4 garbage bytes were output in the middle of the error +# message; by searching for a pattern that spans that location, we can +# catch the error. +let $log_error_= `SELECT @@GLOBAL.log_error`; +if(!$log_error_) +{ + # MySQL Server on windows is started with --console and thus + # does not know the location of its .err log, use default location + let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; +} +--let SEARCH_FILE= $log_error_ +--let SEARCH_RANGE=-50000 +--let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occured on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 +--source include/search_pattern_in_file.inc + +SELECT * FROM t4 ORDER BY a; +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc + +--connection master +--save_master_pos + +--connection slave +--sync_with_master +SELECT * FROM t4 ORDER BY a; + + +--connection slave set @@global.binlog_checksum = @slave_save_binlog_checksum; set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; --echo End of tests +--connection master +DROP TABLE t4; + --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_basic.test b/mysql-test/suite/rpl/t/rpl_gtid_basic.test index 3f2d5e1e321..5ecff519aef 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_basic.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_basic.test @@ -69,15 +69,7 @@ save_master_pos; connection server_4; --replace_result $MASTER_MYPORT MASTER_PORT eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, - MASTER_USE_GTID=SLAVE_POS; -# Test that sql_slave_skip_counter is prevented in GTID mode. ---error ER_SLAVE_SKIP_NOT_IN_GTID -SET GLOBAL sql_slave_skip_counter=1; ---replace_result $MASTER_MYPORT MASTER_PORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, MASTER_USE_GTID=CURRENT_POS; ---error ER_SLAVE_SKIP_NOT_IN_GTID -SET GLOBAL sql_slave_skip_counter=10; --source include/start_slave.inc sync_with_master; SELECT * FROM t1 ORDER BY a; @@ -184,8 +176,8 @@ SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30'; --let $binlog_file= master-bin.000001 --let $binlog_start= 4 --source include/show_binlog_events.inc -SELECT @@GLOBAL.gtid_binlog_pos; -SELECT @@GLOBAL.gtid_binlog_state; +#SELECT @@GLOBAL.gtid_binlog_pos; +#SELECT @@GLOBAL.gtid_binlog_state; --error ER_BINLOG_MUST_BE_EMPTY SET GLOBAL gtid_binlog_state = @old_state; RESET MASTER; @@ -262,6 +254,7 @@ INSERT INTO t1 VALUES (3); --let $pos= `SELECT @@gtid_binlog_pos` --connection s1 +--replace_result $pos POS eval SET @pos= '$pos'; SELECT master_gtid_wait(@pos, 0); SELECT * FROM t1 WHERE a >= 3; @@ -374,6 +367,120 @@ reap; reap; +--echo *** Test sql_gtid_slave_pos when used with GTID *** + +--connection server_2 +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1000; +INSERT INTO t1 VALUES (10); +INSERT INTO t1 VALUES (11); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1010; +INSERT INTO t1 VALUES (12); +INSERT INTO t1 VALUES (13); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 2; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1020; +INSERT INTO t1 VALUES (14); +INSERT INTO t1 VALUES (15); +INSERT INTO t1 VALUES (16); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 3; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1030; +INSERT INTO t1 VALUES (17); +INSERT INTO t1 VALUES (18); +INSERT INTO t1 VALUES (19); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 5; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=3; +SET gtid_seq_no=100; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; +SET gtid_domain_id=2; +SET gtid_seq_no=1040; +INSERT INTO t1 VALUES (20); +--save_master_pos + +--connection server_2 +SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; +SET GLOBAL slave_ddl_exec_mode=STRICT; +SET sql_slave_skip_counter=1; +START SLAVE UNTIL master_gtid_pos="3-1-100"; +--let $master_pos=3-1-100 +--source include/sync_with_master_gtid.inc +--source include/wait_for_slave_sql_to_stop.inc +--error ER_NO_SUCH_TABLE +SELECT * FROM t2; +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +# Start the slave again, it should fail on the DROP TABLE as the table is not there. +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); +SET sql_log_bin=1; +START SLAVE; +--let $slave_sql_errno=1051 +--source include/wait_for_slave_sql_error.inc +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter=2; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +SET GLOBAL slave_ddl_exec_mode= @saved_mode; + --connection server_1 DROP TABLE t1; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_crash.test b/mysql-test/suite/rpl/t/rpl_gtid_crash.test index e02816e1d30..82667b31e57 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_crash.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_crash.test @@ -100,7 +100,7 @@ SET gtid_domain_id= 2; INSERT INTO t1 VALUES (3); FLUSH LOGS; --source include/show_binary_logs.inc ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000003' LIMIT 1,1; --write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect @@ -120,7 +120,7 @@ EOF --source include/wait_until_connected_again.inc --source include/show_binary_logs.inc ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; --save_master_pos diff --git a/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test new file mode 100644 index 00000000000..24298e9893a --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test @@ -0,0 +1,76 @@ +--source include/have_debug.inc +--source include/master-slave.inc + +--echo *** Test MDEV-6120, output of current GTID when a replication error is logged to the errorlog *** +--connection master +CREATE TABLE t1(a INT PRIMARY KEY); +--sync_slave_with_master + +--connection slave +--source include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; + +--connection master +INSERT INTO t1 VALUES (1); +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (2); +INSERT INTO t1 VALUES (3); +INSERT INTO t1 VALUES (4); +--save_master_pos + +--connection slave +SET sql_log_bin=0; +INSERT INTO t1 VALUES (2); +SET sql_log_bin=1; + +START SLAVE; +--let $slave_sql_errno=1062 +--source include/wait_for_slave_sql_error.inc + +--source include/stop_slave.inc +# Skip the problem event from the master. +SET GLOBAL gtid_slave_pos= "0-1-100"; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 ORDER BY a; + +--connection master + +SET @dbug_save= @@debug_dbug; +SET debug_dbug= '+d,incident_database_resync_on_replace'; +REPLACE INTO t1 VALUES (5); +SET debug_dbug= @dbug_save; +--save_master_pos + +--connection slave +--let $slave_sql_errno=1590 +--source include/wait_for_slave_sql_error.inc +--source include/stop_slave.inc +SET sql_slave_skip_counter=1; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 ORDER BY a; + + +# Check error log for correct messages. +let $log_error_= `SELECT @@GLOBAL.log_error`; +if(!$log_error_) +{ + # MySQL Server on windows is started with --console and thus + # does not know the location of its .err log, use default location + let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; +} +--let SEARCH_FILE=$log_error_ +--let SEARCH_RANGE=-50000 +--let SEARCH_PATTERN=Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error +--source include/search_pattern_in_file.inc +--let SEARCH_PATTERN=Slave SQL: The incident LOST_EVENTS occured on the master\. Message: <none>, Internal MariaDB error code: 1590 +--source include/search_pattern_in_file.inc + + +--connection master +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test b/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test index f3cc4aca135..bd5343d7558 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test @@ -139,6 +139,7 @@ SELECT * FROM t4 ORDER BY a,b; --connection server_2 --replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3 eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3; +--replace_result $server3_pos SERVER3_POS eval START SLAVE UNTIL master_gtid_pos = "$server3_pos"; --source include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; @@ -151,6 +152,7 @@ SELECT * FROM t4 ORDER BY a,b; --connection server_2 --replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4; +--replace_result $server4_pos SERVER4_POS eval START SLAVE UNTIL master_gtid_pos = "$server4_pos"; --source include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; @@ -163,6 +165,7 @@ SELECT * FROM t4 ORDER BY a,b; --connection server_2 --replace_result $SERVER_MYPORT_5 SERVER_MYPORT_5 eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_5; +--replace_result $server5_pos SERVER5_POS eval START SLAVE UNTIL master_gtid_pos = "$server5_pos"; --source include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test index 1f0532f9922..b57714aaa57 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test @@ -63,7 +63,7 @@ INSERT INTO t1 VALUES (4); --replace_column 2 # 4 # 5 # SHOW BINLOG EVENTS IN 'master-bin.000003' LIMIT 1,1; FLUSH LOGS; ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; --write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect @@ -83,7 +83,7 @@ EOF --enable_reconnect --source include/wait_until_connected_again.inc ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000005' LIMIT 1,1; --source include/show_binary_logs.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_until.test b/mysql-test/suite/rpl/t/rpl_gtid_until.test index 68ebd464fd1..4dde7da38a5 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_until.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_until.test @@ -175,6 +175,7 @@ DROP TABLE t3; --save_master_pos --connection server_2 +--replace_result $until_condition UNTIL_CONDITION eval START SLAVE UNTIL master_gtid_pos='$until_condition'; --source include/wait_for_slave_to_stop.inc SHOW CREATE TABLE t3; diff --git a/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test b/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test new file mode 100644 index 00000000000..7cdf67d6532 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test @@ -0,0 +1,52 @@ +# Testing master to slave heartbeat protocol, test cases that need debug build. + +--source include/master-slave.inc +--source include/have_debug.inc + +connection slave; +--source include/stop_slave.inc +set @restore_slave_net_timeout= @@global.slave_net_timeout; +--disable_warnings +set @@global.slave_net_timeout= 10; +--enable_warnings + +### +### Checking the range +### + +# +# default period slave_net_timeout/2 +# +--query_vertical show status like 'Slave_heartbeat_period'; +SET @save_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,simulate_slave_heartbeat_network_error"; +CALL mtr.add_suppression('SET @master_heartbeat_period to master failed with error'); +CALL mtr.add_suppression('Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again'); +--source include/start_slave.inc + + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (1); + +sync_slave_with_master; + +--connection slave +SELECT * FROM t1; + +connection master; +drop table t1; + +connection slave; +--source include/stop_slave.inc +--disable_warnings +SET GLOBAL debug_dbug=@save_dbug; +set @@global.slave_net_timeout= @restore_slave_net_timeout; +--enable_warnings +--source include/start_slave.inc + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mdev6020.test b/mysql-test/suite/rpl/t/rpl_mdev6020.test new file mode 100644 index 00000000000..2fd342f5eda --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev6020.test @@ -0,0 +1,70 @@ +--source include/have_innodb.inc +--source include/have_partition.inc +--source include/have_binlog_format_mixed_or_row.inc +--source include/master-slave.inc + +--connection slave +--source include/stop_slave.inc + +--connection master +--let $datadir= `SELECT @@datadir` + +--let $rpl_server_number= 1 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master-bin.000001 +--remove_file $datadir/master-bin.state +--copy_file $MYSQL_TEST_DIR/std_data/mdev6020-mysql-bin.000001 $datadir/master-bin.000001 + +--let $rpl_server_number= 1 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection slave +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +SET @old_engine= @@GLOBAL.default_storage_engine; +SET GLOBAL default_storage_engine=InnoDB; +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=12; +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4; +--source include/start_slave.inc + +--connection master +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +--save_master_pos + +--connection slave +--sync_with_master + +SELECT @@gtid_slave_pos; +CHECKSUM TABLE table0_int_autoinc, table0_key_pk_parts_2_int_autoinc, table100_int_autoinc, table100_key_pk_parts_2_int_autoinc, table10_int_autoinc, table10_key_pk_parts_2_int_autoinc, table1_int_autoinc, table1_key_pk_parts_2_int_autoinc, table2_int_autoinc, table2_key_pk_parts_2_int_autoinc; + +--source include/stop_slave.inc + + +SET GLOBAL default_storage_engine= @old_engine; +SET GLOBAL slave_parallel_threads=@old_parallel; +SET sql_log_bin=0; +DROP TABLE table0_int_autoinc; +DROP TABLE table0_key_pk_parts_2_int_autoinc; +DROP TABLE table100_int_autoinc; +DROP TABLE table100_key_pk_parts_2_int_autoinc; +DROP TABLE table10_int_autoinc; +DROP TABLE table10_key_pk_parts_2_int_autoinc; +DROP TABLE table1_int_autoinc; +DROP TABLE table1_key_pk_parts_2_int_autoinc; +DROP TABLE table2_int_autoinc; +DROP TABLE table2_key_pk_parts_2_int_autoinc; +SET sql_log_bin=1; + +--source include/start_slave.inc + +--connection master + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mdev6386-slave.opt b/mysql-test/suite/rpl/t/rpl_mdev6386-slave.opt new file mode 100644 index 00000000000..a5d959ae7fe --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev6386-slave.opt @@ -0,0 +1 @@ +--disable-log-slave-updates diff --git a/mysql-test/suite/rpl/t/rpl_mdev6386.test b/mysql-test/suite/rpl/t/rpl_mdev6386.test new file mode 100644 index 00000000000..5513d15a77d --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev6386.test @@ -0,0 +1,71 @@ +--source include/have_innodb.inc +--source include/master-slave.inc + +--connection master +# ToDo: Remove this FLUSH LOGS when MDEV-6403 is fixed. +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +FLUSH LOGS; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) Engine=InnoDB; +--sync_slave_with_master + +--connection slave +--source include/stop_slave.inc +# Provoke a duplicate key error on replication. +SET sql_log_bin= 0; +INSERT INTO t1 VALUES (1, 2); +SET sql_log_bin= 1; +CHANGE MASTER TO master_use_gtid= current_pos; +--echo Contents on slave before: +SELECT * FROM t1 ORDER BY a; + +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=8; + +--connection master + +CREATE TEMPORARY TABLE t2 LIKE t1; +INSERT INTO t2 VALUE (1, 1); +INSERT INTO t2 VALUE (2, 1); +INSERT INTO t2 VALUE (3, 1); +INSERT INTO t2 VALUE (4, 1); +INSERT INTO t2 VALUE (5, 1); +INSERT INTO t1 SELECT * FROM t2; +DROP TEMPORARY TABLE t2; +--save_master_pos +--echo Contents on master: +SELECT * FROM t1 ORDER BY a; + +--connection slave +START SLAVE; +# The slave will stop with a duplicate key error. +# The bug was 1) that the next DROP TEMPORARY TABLE would be allowed to run +# anyway, and 2) that then record_gtid() would get an error during commit +# (since the prior commit failed), and this error was not correctly handled, +# which caused an assertion about closing tables while a statement was still +# active. +--let $slave_sql_errno=1062 +--source include/wait_for_slave_sql_error.inc + +STOP SLAVE IO_THREAD; +--echo Contents on slave on slave error: +SELECT * FROM t1 ORDER BY a; + +# Resolve the duplicate key error so replication can be resumed. +SET sql_log_bin= 0; +DELETE FROM t1 WHERE a=1; +SET sql_log_bin= 1; + +--source include/start_slave.inc +--sync_with_master +--echo Contents on slave after: +SELECT * FROM t1 ORDER BY a; + +--connection master +DROP TABLE t1; + +--connection slave +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads= @old_parallel; +--source include/start_slave.inc + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test b/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test index 398768e87bf..e99a233ac34 100644 --- a/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test +++ b/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test @@ -5,11 +5,9 @@ # options are added into mysql_upgrade. These options control whether sql # statements are binlogged or not. ############################################################################# ---source include/master-slave.inc --source include/have_innodb.inc - -# Only run test if "mysql_upgrade" is found ---source include/have_mysql_upgrade.inc +--source include/mysql_upgrade_preparation.inc +--source include/master-slave.inc call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); call mtr.add_suppression("table or database name 'mysqltest-1'"); diff --git a/mysql-test/suite/rpl/t/rpl_parallel.test b/mysql-test/suite/rpl/t/rpl_parallel.test index 9b68d6648e0..0f679fa18ee 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel.test +++ b/mysql-test/suite/rpl/t/rpl_parallel.test @@ -438,7 +438,7 @@ SET debug_sync='now WAIT_FOR t2_killed'; # Now we can allow T1 to proceed. SET debug_sync='now SIGNAL t1_cont'; ---let $slave_sql_errno= 1317,1964 +--let $slave_sql_errno= 1317,1927,1964 --source include/wait_for_slave_sql_error.inc STOP SLAVE IO_THREAD; SELECT * FROM t3 WHERE a >= 30 ORDER BY a; @@ -573,7 +573,7 @@ SET debug_sync='now WAIT_FOR t2_killed'; # Now we can allow T1 to proceed. SET debug_sync='now SIGNAL t1_cont'; ---let $slave_sql_errno= 1317,1964 +--let $slave_sql_errno= 1317,1927,1964 --source include/wait_for_slave_sql_error.inc # Now we have to disable the debug_sync statements, so they do not trigger @@ -712,7 +712,7 @@ SET debug_sync='now WAIT_FOR t2_killed'; # Now we can allow T1 to proceed. SET debug_sync='now SIGNAL t1_cont'; ---let $slave_sql_errno= 1317,1964 +--let $slave_sql_errno= 1317,1927,1964 --source include/wait_for_slave_sql_error.inc SELECT * FROM t3 WHERE a >= 50 ORDER BY a; @@ -1277,6 +1277,7 @@ eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS t --connection server_1 FLUSH LOGS; +--source include/wait_for_binlog_checkpoint.inc --save_master_pos --connection server_2 @@ -1291,6 +1292,54 @@ eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS t --enable_query_log +--echo *** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** + +--connection server_1 +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; + +--connection con1 +SET @old_format= @@binlog_format; +SET binlog_format= statement; +--let $conid = `SELECT CONNECTION_ID()` +SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; +send INSERT INTO t6 VALUES (1), (2), (3); + +--connection server_1 +SET debug_sync='now WAIT_FOR ready'; +--replace_result $conid CONID +eval KILL QUERY $conid; +SET debug_sync='now SIGNAL cont'; + +--connection con1 +--error ER_QUERY_INTERRUPTED +--reap +SET binlog_format= @old_format; +SET debug_sync='RESET'; +--let $after_error_gtid_pos= `SELECT @@gtid_binlog_pos` + +--connection server_1 +SET debug_sync='RESET'; + + +--connection server_2 +--let $slave_sql_errno= 1317 +--source include/wait_for_slave_sql_error.inc +STOP SLAVE IO_THREAD; +--replace_result $after_error_gtid_pos AFTER_ERROR_GTID_POS +eval SET GLOBAL gtid_slave_pos= '$after_error_gtid_pos'; +--source include/start_slave.inc + +--connection server_1 +INSERT INTO t6 VALUES (4); +SELECT * FROM t6 ORDER BY a; +--save_master_pos + +--connection server_2 +--sync_with_master +SELECT * FROM t6 ORDER BY a; + + --connection server_2 --source include/stop_slave.inc SET GLOBAL slave_parallel_threads=@old_parallel_threads; @@ -1299,7 +1348,7 @@ SET DEBUG_SYNC= 'RESET'; --connection server_1 DROP function foo; -DROP TABLE t1,t2,t3,t4,t5; +DROP TABLE t1,t2,t3,t4,t5,t6; SET DEBUG_SYNC= 'RESET'; --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_retry.test b/mysql-test/suite/rpl/t/rpl_parallel_retry.test new file mode 100644 index 00000000000..d3be6262cb0 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_retry.test @@ -0,0 +1,220 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +--echo *** Test retry of transactions that fail to replicate due to deadlock or similar temporary error. *** + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1); +--save_master_pos + +# Use a stored function to inject a debug_sync into the appropriate THD. +# The function does nothing on the master, and on the slave it injects the +# desired debug_sync action(s). +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=5; +--source include/start_slave.inc +--sync_with_master +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; +--source include/stop_slave.inc + +--connection server_1 +SET gtid_seq_no = 100; +BEGIN; +INSERT INTO t1 VALUES (2,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (3,1); +COMMIT; +SELECT * FROM t1 ORDER BY a; +--save_master_pos + +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 ORDER BY a; + + +--echo *** Test that double retry works when the first retry also fails with temp error *** +--source include/stop_slave.inc + +--connection server_1 +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 10; +BEGIN; +INSERT INTO t1 VALUES (4,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (5,1); +INSERT INTO t1 VALUES (6,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +--save_master_pos + +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_double_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 ORDER BY a; + + +--echo *** Test too many retries, eventually causing failure. *** +--source include/stop_slave.inc + +--connection server_1 +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 11; +BEGIN; +INSERT INTO t1 VALUES (7,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (8,1); +INSERT INTO t1 VALUES (9,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +--save_master_pos + +--connection server_2 +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave worker thread retried transaction 10 time\\(s\\) in vain, giving up"); +CALL mtr.add_suppression("Slave: Deadlock found when trying to get lock; try restarting transaction"); +SET sql_log_bin=1; + +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +START SLAVE; +--let $slave_sql_errno= 1213 +--let $slave_timeout= 10 +--source include/wait_for_slave_sql_error.inc +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 ORDER BY a; +STOP SLAVE IO_THREAD; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 ORDER BY a; + +--echo *** Test retry of event group that spans multiple relay log files. *** + +--connection server_1 +CREATE TABLE t2 (a int PRIMARY KEY, b BLOB) ENGINE=InnoDB; +INSERT INTO t2 VALUES (1,"Hulubullu"); +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc +SET @old_max= @@GLOBAL.max_relay_log_size; +SET GLOBAL max_relay_log_size=4096; + +--connection server_1 +--let $big= `SELECT REPEAT("*", 5000)` +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 12; +BEGIN; +--disable_query_log +eval INSERT INTO t2 VALUES (2, CONCAT("Hello ", "$big")); +eval INSERT INTO t2 VALUES (3, CONCAT("Long data: ", "$big")); +--enable_query_log +INSERT INTO t1 VALUES (10, 4); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +--save_master_pos + +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT a, LENGTH(b) FROM t2 ORDER BY a; + +--connection server_1 +INSERT INTO t1 VALUES (11,11); +--disable_query_log +eval INSERT INTO t2 VALUES (4, "$big"); +--enable_query_log +--save_master_pos + +--connection server_2 +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +SET GLOBAL max_relay_log_size=@old_max; + + +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1, t2; +DROP function foo; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_read_new_relay_log_info.test b/mysql-test/suite/rpl/t/rpl_read_new_relay_log_info.test new file mode 100644 index 00000000000..1e2c8ce2d9f --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_read_new_relay_log_info.test @@ -0,0 +1,43 @@ +# ==== Purpose ==== +# +# - Verify that the post-WL#344 format of relay_log.info can be parsed. + +--source include/master-slave.inc + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +--sync_slave_with_master + +--echo ==== Check that we can understand the new format of relay-log.info ==== +--source include/stop_slave.inc + +RESET SLAVE; +--let $MYSQLD_DATADIR= `select @@datadir` + +# the new version of relay_log.info comes in two versions: with path +# separator '/' (most systems) and with path separator '\' (windows) +if ($SYSTEM_PATH_SEPARATOR != /) { + --let $file_suffix= -win +} +--copy_file $MYSQL_TEST_DIR/std_data/new-format-relay-log$file_suffix.info $MYSQLD_DATADIR/relay-log.info + +--echo # Read relay-log.info +START SLAVE IO_THREAD; +--source include/wait_for_slave_io_to_start.inc +--echo # Check that relay log coordinates are equal to those saved in new-format_relay-log.info +--let $master_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1) +--let $master_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1) +--let $relay_log_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1) +--let $relay_log_pos= query_get_value(SHOW SLAVE STATUS, Relay_Log_Pos, 1) +--echo $master_file= $master_file, $master_pos, $relay_log_file, $relay_log_pos +if (`SELECT "$master_file" != "" OR + "$master_pos" != "0" OR + "$relay_log_file" != "slave-relay-bin.000001" OR + "$relay_log_pos" != "4"`) { + --echo ERROR: log coordinates changed + --die log coordinates changed +} + +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_read_old_relay_log_info.test b/mysql-test/suite/rpl/t/rpl_read_old_relay_log_info.test new file mode 100644 index 00000000000..ce345445c08 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_read_old_relay_log_info.test @@ -0,0 +1,44 @@ +# ==== Purpose ==== +# +# - Verify that the pre-WL#344 format of relay_log.info can still be +# parsed. + +--source include/master-slave.inc + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +--sync_slave_with_master + +--echo ==== Check that we still understand the old format of relay-log.info ==== +--source include/stop_slave.inc + +RESET SLAVE; +--let $MYSQLD_DATADIR= `select @@datadir` + +# the old version of relay_log.info comes in two versions: with path +# separator '/' (most systems) and with path separator '\' (windows) +if ($SYSTEM_PATH_SEPARATOR != /) { + --let $file_suffix= -win +} +--copy_file $MYSQL_TEST_DIR/std_data/old-format-relay-log$file_suffix.info $MYSQLD_DATADIR/relay-log.info + +--echo # Read relay-log.info +START SLAVE IO_THREAD; +--source include/wait_for_slave_io_to_start.inc +--echo # Check that relay log coordinates are equal to those we saved in old-format_relay-log.info +--let $master_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1) +--let $master_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1) +--let $relay_log_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1) +--let $relay_log_pos= query_get_value(SHOW SLAVE STATUS, Relay_Log_Pos, 1) +--echo $master_file= $master_file, $master_pos, $relay_log_file, $relay_log_pos +if (`SELECT "$master_file" != "" OR + "$master_pos" != "0" OR + "$relay_log_file" != "slave-relay-bin.000001" OR + "$relay_log_pos" != "4"`) { + --echo ERROR: log coordinates changed + --die log coordinates changed +} + +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_uninstall_plugin.test b/mysql-test/suite/rpl/t/rpl_semi_sync_uninstall_plugin.test new file mode 100644 index 00000000000..4ee345e54ba --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_uninstall_plugin.test @@ -0,0 +1,138 @@ +############################################################################### +# Bug#17638477 UNINSTALL AND INSTALL SEMI-SYNC PLUGIN CAUSES SLAVES TO BREAK +# Problem: Uninstallation of Semi sync plugin should be blocked when it is +# in use. +# Test case: Uninstallation of semi sync should be allowed +# On Master: +# 1) When there is no dump thread +# 2) When there are no semi sync slaves (i.e., async replication). +# On Slave: +# 1) When there is no I/O thread +# 2) When there are no semi sync enabled I/O thread (i.e.,async replication). +############################################################################### + +--source include/have_semisync_plugin.inc +--source include/not_embedded.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +call mtr.add_suppression("Read semi-sync reply network error"); +call mtr.add_suppression("Timeout waiting for reply of binlog"); + +############################################################################### +# Case 1: Uninstallation of semi sync plugins should be allowed when it is +# not in use i.e., when asynchronous replication is active. +############################################################################### +# Step 1.1: Install semi sync master plugin on master +INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master'; + +# Step 1.2: Install semi sync slave plugin on slave +--connection slave +--echo [connection slave] +INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave'; + +# Step 1.3: Uninstallation of semisync plugin on master and slave should be +# allowed at this state as there is no semi sync replication enabled between +# master and slave. +UNINSTALL PLUGIN rpl_semi_sync_slave; +--connection master +--echo [connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; + +# Step 1.4: Check that replication is working fine at the end of the test case. +CREATE TABLE t1(i int); +INSERT INTO t1 values (1); +DROP TABLE t1; +--sync_slave_with_master +--echo [connection slave] + +############################################################################### +# Case 2: Uninstallation of semi sync plugins should be disallowed +# when it is in use i.e., when semi sync replication is active +############################################################################### +# Step 2.1: Install and enable semi sync replication between master and slave +--source include/install_semisync.inc + +# Step 2.2: Check that rpl_semi_sync_slave uninstallation on Slave is not +# possible at this state +--connection slave +--echo [connection slave] +UNINSTALL PLUGIN rpl_semi_sync_slave; +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +# Step 2.3: Check that rpl_semi_sync_master uninstallation on Master is not +# possible at this state +--connection master +--echo [connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +# Step 2.4: Check that replication is working fine at the end of the test case. +CREATE TABLE t1(i int); +INSERT INTO t1 values (2); +DROP TABLE t1; +--sync_slave_with_master +--echo [connection slave] + +# Step 2.5: Make sure rpl_semi_sync_master_status on Master and +# rpl_semi_sync_slave_staus on Slave are ON +show status like "Rpl_semi_sync_slave_status"; + +############################################################################### +# Case 3: Uninstallation of semi sync plugin should be disallowed when there +# are semi sync slaves even though rpl_semi_sync_master_enabled= OFF;. +############################################################################### +# Step 3.1: Disable semi sync on master +--connection master +--echo [connection master] +show status like "Rpl_semi_sync_master_status"; + +# Step 3.2: Check that still Rpl_semi_sync_master_clients is 1 +show status like "Rpl_semi_sync_master_clients"; + +# Step 3.3: Since Rpl_semi_sync_master_clients is 1, uninstallation of +# rpl_semi_sync_master should be disallowed. +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +############################################################################### +# Case 4: Uninstallation of semi sync plugin should be allowed when it is not +# in use. Same as Case 1 but this case is to check the case after enabling and +# disabling semi sync replication. +############################################################################### + +# Step 4.1: Stop IO thread on slave. +--connection slave +--echo [connection slave] +--source include/stop_slave.inc + +# Step 4.2: Disable semi sync on slave. +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +--connection master +--echo [connection master] +# Send something to the slave so that the master would notice that nobody's listening. +create table t2 (a int); drop table t2; +# and wait for plugin to be unloaded automatically +let $wait_condition=select count(*) = 0 from information_schema.plugins where plugin_name like 'rpl_%'; +--source include/wait_condition.inc + +--connection slave +--echo [connection slave] + +# Step 4.3: Start IO thread on slave. +--source include/start_slave.inc + +# Step 4.4: Uninstall semi sync plugin, it should be successful now. +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +# Step 4.7: Check that replication is working fine at the end of the test case +--connection master +--echo [connection master] +CREATE TABLE t1(i int); +INSERT INTO t1 values (3); +DROP TABLE t1; +--sync_slave_with_master +--echo [connection slave] + +# Cleanup +source include/rpl_end.inc; diff --git a/mysql-test/suite/rpl/t/rpl_skip_incident-master.opt b/mysql-test/suite/rpl/t/rpl_skip_incident-master.opt new file mode 100644 index 00000000000..912801debc4 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_skip_incident-master.opt @@ -0,0 +1 @@ +--loose-debug=+d,incident_database_resync_on_replace diff --git a/mysql-test/suite/rpl/t/rpl_skip_incident-slave.opt b/mysql-test/suite/rpl/t/rpl_skip_incident-slave.opt new file mode 100644 index 00000000000..bc90d5ea6c1 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_skip_incident-slave.opt @@ -0,0 +1 @@ +--slave-skip-error=1590 diff --git a/mysql-test/suite/rpl/t/rpl_skip_incident.test b/mysql-test/suite/rpl/t/rpl_skip_incident.test new file mode 100644 index 00000000000..959fde9374e --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_skip_incident.test @@ -0,0 +1,28 @@ +--source include/master-slave.inc +--source include/have_debug.inc + +--echo **** On Master **** +CREATE TABLE t1 (a INT); + +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; + +# This will generate an incident log event and store it in the binary +# log before the replace statement. +REPLACE INTO t1 VALUES (4); + +--sync_slave_with_master + +# Now, we should have inserted the row into the table and the slave +# should be running. We should also have rotated to a new binary log. + +SELECT * FROM t1; +source include/check_slave_is_running.inc; + +connection master; + +--echo Should have two binary logs here +--source include/show_binary_logs.inc +DROP TABLE t1; +--sync_slave_with_master +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_sp.test b/mysql-test/suite/rpl/t/rpl_sp.test index 00815ab9d7e..c978a145a92 100644 --- a/mysql-test/suite/rpl/t/rpl_sp.test +++ b/mysql-test/suite/rpl/t/rpl_sp.test @@ -614,7 +614,7 @@ show function status like '%mysqltestbug36570%'; connection master; flush logs; let $MYSQLD_DATADIR= `select @@datadir`; ---replace_regex s/$MYSQL_TEST_DIR/MYSQL_TEST_DIR/ s/TIMESTAMP=[0-9]*/TIMESTAMP=t/ +--replace_regex /$MYSQL_TEST_DIR/MYSQL_TEST_DIR/ /TIMESTAMP=[0-9]*/TIMESTAMP=t/ --exec $MYSQL_BINLOG --short-form $MYSQLD_DATADIR/master-bin.000001 use test; drop procedure mysqltestbug36570_p1; diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave.test b/mysql-test/suite/rpl/t/rpl_stop_slave.test index d9d7f39c321..340738f8cb2 100644 --- a/mysql-test/suite/rpl/t/rpl_stop_slave.test +++ b/mysql-test/suite/rpl/t/rpl_stop_slave.test @@ -74,14 +74,17 @@ CREATE TABLE t2 (c1 INT) ENGINE=MyISAM; INSERT INTO t1 VALUES(1, 1); sync_slave_with_master; +--source include/stop_slave.inc --source include/rpl_connection_master.inc +# make sure that there are no zombie threads +--source include/stop_dump_threads.inc let $debug_save= `SELECT @@GLOBAL.debug`; SET GLOBAL debug_dbug= '+d,dump_thread_wait_before_send_xid,*'; --source include/rpl_connection_slave.inc -source include/restart_slave_sql.inc; +--source include/start_slave.inc BEGIN; UPDATE t1 SET c2 = 2 WHERE c1 = 1; @@ -93,6 +96,10 @@ INSERT INTO t2 VALUES(1); UPDATE t1 SET c2 = 3 WHERE c1 = 1; COMMIT; +# wait for the dump thread reach the sync point +--let $wait_condition= select count(*)=1 from information_schema.processlist where state LIKE '%debug sync point%' and command='Binlog Dump' +--source include/wait_condition.inc + --source include/rpl_connection_slave1.inc let $show_statement= SHOW PROCESSLIST; let $field= Info; @@ -105,6 +112,7 @@ send STOP SLAVE; ROLLBACK; --source include/rpl_connection_master.inc + SET DEBUG_SYNC= 'now SIGNAL signal.continue'; SET DEBUG_SYNC= 'now WAIT_FOR signal.continued'; @@ -113,12 +121,25 @@ source include/wait_for_slave_to_stop.inc; --source include/rpl_connection_slave1.inc reap; -source include/start_slave.inc; +# Slave has stopped, thence lets make sure that +# we kill the zombie dump threads. Also, make +# sure that we disable the DBUG_EXECUTE_IF +# that would set the dump thread to wait --source include/rpl_connection_master.inc -DROP TABLE t1, t2; --disable_query_log eval SET GLOBAL debug_dbug= '$debug_save'; --enable_query_log +# make sure that there are no zombie threads +--source include/stop_dump_threads.inc + +--source include/rpl_connection_slave1.inc +# now the dump thread on the master will start +# from a clean slate, i.e. without the +# DBUG_EXECUTE_IF set +source include/start_slave.inc; + +--source include/rpl_connection_master.inc +DROP TABLE t1, t2; --source include/rpl_end.inc SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/rpl/t/rpl_table_options.test b/mysql-test/suite/rpl/t/rpl_table_options.test index 12ff1ca457b..3f52444a3c7 100644 --- a/mysql-test/suite/rpl/t/rpl_table_options.test +++ b/mysql-test/suite/rpl/t/rpl_table_options.test @@ -23,6 +23,8 @@ show create table t1; sync_slave_with_master; connection slave; show create table t1; +set sql_mode=ignore_bad_table_options; +show create table t1; connection master; drop table t1; diff --git a/mysql-test/suite/sys_vars/r/aria_pagecache_file_hash_size_basic.result b/mysql-test/suite/sys_vars/r/aria_pagecache_file_hash_size_basic.result new file mode 100644 index 00000000000..0bdd56c298f --- /dev/null +++ b/mysql-test/suite/sys_vars/r/aria_pagecache_file_hash_size_basic.result @@ -0,0 +1,21 @@ +select @@global.aria_pagecache_file_hash_size; +@@global.aria_pagecache_file_hash_size +512 +select @@session.aria_pagecache_file_hash_size; +ERROR HY000: Variable 'aria_pagecache_file_hash_size' is a GLOBAL variable +show global variables like 'aria_pagecache_file_hash_size'; +Variable_name Value +aria_pagecache_file_hash_size 512 +show session variables like 'aria_pagecache_file_hash_size'; +Variable_name Value +aria_pagecache_file_hash_size 512 +select * from information_schema.global_variables where variable_name='aria_pagecache_file_hash_size'; +VARIABLE_NAME VARIABLE_VALUE +ARIA_PAGECACHE_FILE_HASH_SIZE 512 +select * from information_schema.session_variables where variable_name='aria_pagecache_file_hash_size'; +VARIABLE_NAME VARIABLE_VALUE +ARIA_PAGECACHE_FILE_HASH_SIZE 512 +set global aria_pagecache_file_hash_size=200; +ERROR HY000: Variable 'aria_pagecache_file_hash_size' is a read only variable +set session aria_pagecache_file_hash_size=200; +ERROR HY000: Variable 'aria_pagecache_file_hash_size' is a read only variable diff --git a/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result b/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result new file mode 100644 index 00000000000..7a6c9ca2db6 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result @@ -0,0 +1,77 @@ +SET @start_global_value = @@global.innodb_simulate_comp_failures; +SELECT @start_global_value; +@start_global_value +0 +Valid values are between 0 and 99 +select @@global.innodb_simulate_comp_failures between 0 and 99; +@@global.innodb_simulate_comp_failures between 0 and 99 +1 +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +select @@session.innodb_simulate_comp_failures; +ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable +show global variables like 'innodb_simulate_comp_failures'; +Variable_name Value +innodb_simulate_comp_failures 0 +show session variables like 'innodb_simulate_comp_failures'; +Variable_name Value +innodb_simulate_comp_failures 0 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 0 +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 0 +set global innodb_simulate_comp_failures=10; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +10 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 10 +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 10 +set session innodb_simulate_comp_failures=1; +ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_simulate_comp_failures=1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures' +set global innodb_simulate_comp_failures=1e1; +ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures' +set global innodb_simulate_comp_failures="foo"; +ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures' +set global innodb_simulate_comp_failures=-7; +Warnings: +Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '-7' +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 0 +set global innodb_simulate_comp_failures=106; +Warnings: +Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '106' +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +99 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 99 +set global innodb_simulate_comp_failures=0; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +set global innodb_simulate_comp_failures=99; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +99 +set global innodb_simulate_comp_failures=DEFAULT; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +SET @@global.innodb_simulate_comp_failures = @start_global_value; +SELECT @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 diff --git a/mysql-test/suite/sys_vars/r/key_cache_file_hash_size_basic.result b/mysql-test/suite/sys_vars/r/key_cache_file_hash_size_basic.result new file mode 100644 index 00000000000..52ebfc98cdc --- /dev/null +++ b/mysql-test/suite/sys_vars/r/key_cache_file_hash_size_basic.result @@ -0,0 +1,114 @@ +SET @start_value = @@global.key_cache_file_hash_size; +SELECT @start_value; +@start_value +512 +'#--------------------FN_DYNVARS_056_01------------------------#' +SET @@global.key_cache_file_hash_size = DEFAULT; +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +512 +'#---------------------FN_DYNVARS_056_02-------------------------#' +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size = 300; +@@global.key_cache_file_hash_size = 300 +0 +'#--------------------FN_DYNVARS_056_03------------------------#' +SET @@global.key_cache_file_hash_size = 128; +SET @@global.key_cache_file_hash_size = 16384; +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +16384 +'#--------------------FN_DYNVARS_056_04-------------------------#' +SET @@global.key_cache_file_hash_size = -1; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '-1' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = 42949672951; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '42949672951' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +16384 +SET @@global.key_cache_file_hash_size = 10000.01; +ERROR 42000: Incorrect argument type to variable 'key_cache_file_hash_size' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +16384 +SET @@global.key_cache_file_hash_size = -1024; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '-1024' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = 99; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '99' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = ON; +ERROR 42000: Incorrect argument type to variable 'key_cache_file_hash_size' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = 'test'; +ERROR 42000: Incorrect argument type to variable 'key_cache_file_hash_size' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +'#-------------------FN_DYNVARS_056_05----------------------------#' +SET @@session.key_cache_file_hash_size = 0; +ERROR HY000: Variable 'key_cache_file_hash_size' is a GLOBAL variable and should be set with SET GLOBAL +SELECT @@session.key_cache_file_hash_size; +ERROR HY000: Variable 'key_cache_file_hash_size' is a GLOBAL variable +'#----------------------FN_DYNVARS_056_06------------------------#' +SELECT @@global.key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; +@@global.key_cache_file_hash_size = VARIABLE_VALUE +1 +SELECT @@key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.SESSION_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; +@@key_cache_file_hash_size = VARIABLE_VALUE +1 +'#---------------------FN_DYNVARS_056_07----------------------#' +SET @@global.key_cache_file_hash_size = TRUE; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '1' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = FALSE; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '0' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +'#---------------------FN_DYNVARS_056_08----------------------#' +SET @@global.key_cache_file_hash_size = 150; +SELECT @@key_cache_file_hash_size = @@global.key_cache_file_hash_size; +@@key_cache_file_hash_size = @@global.key_cache_file_hash_size +1 +'#---------------------FN_DYNVARS_056_09----------------------#' +SET key_cache_file_hash_size = 8000; +ERROR HY000: Variable 'key_cache_file_hash_size' is a GLOBAL variable and should be set with SET GLOBAL +SELECT @@key_cache_file_hash_size; +@@key_cache_file_hash_size +150 +SET local.key_cache_file_hash_size = 10; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'key_cache_file_hash_size = 10' at line 1 +SELECT local.key_cache_file_hash_size; +ERROR 42S02: Unknown table 'local' in field list +SET global.key_cache_file_hash_size = 10; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'key_cache_file_hash_size = 10' at line 1 +SELECT global.key_cache_file_hash_size; +ERROR 42S02: Unknown table 'global' in field list +SELECT key_cache_file_hash_size = @@session.key_cache_file_hash_size; +ERROR 42S22: Unknown column 'key_cache_file_hash_size' in 'field list' +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +512 diff --git a/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result b/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result index 50a5285b0d7..8c295fe8063 100644 --- a/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result +++ b/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result @@ -4,7 +4,11 @@ SELECT @global_start_value; 0 '#--------------------FN_DYNVARS_177_01------------------------#' SET @@global.timed_mutexes = 1; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SET @@global.timed_mutexes = DEFAULT; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 @@ -17,15 +21,21 @@ SELECT @@timed_mutexes; SELECT global.timed_mutexes; ERROR 42S02: Unknown table 'global' in field list SET global timed_mutexes = 1; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 '#--------------------FN_DYNVARS_177_03------------------------#' SET @@global.timed_mutexes = 0; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 SET @@global.timed_mutexes = 1; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 @@ -82,23 +92,33 @@ VARIABLE_VALUE ON '#---------------------FN_DYNVARS_177_08-------------------------#' SET @@global.timed_mutexes = OFF; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 SET @@global.timed_mutexes = ON; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 '#---------------------FN_DYNVARS_177_09----------------------#' SET @@global.timed_mutexes = TRUE; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 SET @@global.timed_mutexes = FALSE; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 SET @@global.timed_mutexes = @global_start_value; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 diff --git a/mysql-test/suite/sys_vars/t/aria_pagecache_file_hash_size_basic.test b/mysql-test/suite/sys_vars/t/aria_pagecache_file_hash_size_basic.test new file mode 100644 index 00000000000..8bedb498e2c --- /dev/null +++ b/mysql-test/suite/sys_vars/t/aria_pagecache_file_hash_size_basic.test @@ -0,0 +1,22 @@ +# ulong readonly + +--source include/have_maria.inc +# +# show the global and session values; +# +select @@global.aria_pagecache_file_hash_size; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.aria_pagecache_file_hash_size; +show global variables like 'aria_pagecache_file_hash_size'; +show session variables like 'aria_pagecache_file_hash_size'; +select * from information_schema.global_variables where variable_name='aria_pagecache_file_hash_size'; +select * from information_schema.session_variables where variable_name='aria_pagecache_file_hash_size'; + +# +# show that it's read-only +# +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set global aria_pagecache_file_hash_size=200; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set session aria_pagecache_file_hash_size=200; + diff --git a/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test b/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test new file mode 100644 index 00000000000..97e69e3f324 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test @@ -0,0 +1,64 @@ +--source include/have_innodb.inc + +SET @start_global_value = @@global.innodb_simulate_comp_failures; +SELECT @start_global_value; + +# +# exists as global only +# + +--echo Valid values are between 0 and 99 +select @@global.innodb_simulate_comp_failures between 0 and 99; +select @@global.innodb_simulate_comp_failures; + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.innodb_simulate_comp_failures; + +show global variables like 'innodb_simulate_comp_failures'; +show session variables like 'innodb_simulate_comp_failures'; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; + +# +# show that it's writable +# + +set global innodb_simulate_comp_failures=10; +select @@global.innodb_simulate_comp_failures; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; + +--error ER_GLOBAL_VARIABLE +set session innodb_simulate_comp_failures=1; + +# +# incorrect types +# + +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_simulate_comp_failures=1.1; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_simulate_comp_failures=1e1; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_simulate_comp_failures="foo"; + +set global innodb_simulate_comp_failures=-7; +select @@global.innodb_simulate_comp_failures; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +set global innodb_simulate_comp_failures=106; +select @@global.innodb_simulate_comp_failures; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; + +# +# min/max/DEFAULT values +# + +set global innodb_simulate_comp_failures=0; +select @@global.innodb_simulate_comp_failures; +set global innodb_simulate_comp_failures=99; +select @@global.innodb_simulate_comp_failures; +set global innodb_simulate_comp_failures=DEFAULT; +select @@global.innodb_simulate_comp_failures; + +SET @@global.innodb_simulate_comp_failures = @start_global_value; +SELECT @@global.innodb_simulate_comp_failures; diff --git a/mysql-test/suite/sys_vars/t/key_cache_file_hash_size_basic.test b/mysql-test/suite/sys_vars/t/key_cache_file_hash_size_basic.test new file mode 100644 index 00000000000..deebe708d3d --- /dev/null +++ b/mysql-test/suite/sys_vars/t/key_cache_file_hash_size_basic.test @@ -0,0 +1,168 @@ +################# mysql-test\t\key_cache_file_hash_size.test ################## +# # +# Variable Name: key_cache_file_hash_size # +# Scope: GLOBAL # +# Access Type: Dynamic # +# Data Type: numeric # +# Default Value: 300 # +# Range: 100-4294967295 # +# # +# # +# Creation Date: 2008-02-07 # +# Author: Salman # +# # +# Description: Test Cases of Dynamic System Variable key_cache_file_hash_size # +# that checks the behavior of this variable in the following ways# +# * Default Value # +# * Valid & Invalid values # +# * Scope & Access method # +# * Data Integrity # +# # +# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # +# server-system-variables.html # +# # +############################################################################### + +--source include/load_sysvars.inc + +######################################################################## +# START OF key_cache_file_hash_size TESTS # +######################################################################## + + +############################################################################# +# Saving initial value of key_cache_file_hash_size in a temporary variable # +############################################################################# + +SET @start_value = @@global.key_cache_file_hash_size; +SELECT @start_value; + + +--echo '#--------------------FN_DYNVARS_056_01------------------------#' +################################################################################ +# Display the DEFAULT value of key_cache_file_hash_size # +################################################################################ + +SET @@global.key_cache_file_hash_size = DEFAULT; +SELECT @@global.key_cache_file_hash_size; + + +--echo '#---------------------FN_DYNVARS_056_02-------------------------#' +############################################### +# Verify default value of variable # +############################################### + +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size = 300; + + +--echo '#--------------------FN_DYNVARS_056_03------------------------#' +############################################################################### +# Change the value of key_cache_file_hash_size to a valid value # +############################################################################### + +SET @@global.key_cache_file_hash_size = 128; +SET @@global.key_cache_file_hash_size = 16384; +SELECT @@global.key_cache_file_hash_size; + +--echo '#--------------------FN_DYNVARS_056_04-------------------------#' +########################################################################### +# Change the value of key_cache_file_hash_size to invalid value # +########################################################################### + +SET @@global.key_cache_file_hash_size = -1; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = 42949672951; +SELECT @@global.key_cache_file_hash_size; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.key_cache_file_hash_size = 10000.01; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = -1024; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = 99; +SELECT @@global.key_cache_file_hash_size; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.key_cache_file_hash_size = ON; +SELECT @@global.key_cache_file_hash_size; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.key_cache_file_hash_size = 'test'; +SELECT @@global.key_cache_file_hash_size; + + +--echo '#-------------------FN_DYNVARS_056_05----------------------------#' +########################################################################### +# Test if accessing session key_cache_file_hash_size gives error # +########################################################################### + +--Error ER_GLOBAL_VARIABLE +SET @@session.key_cache_file_hash_size = 0; +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT @@session.key_cache_file_hash_size; + + +--echo '#----------------------FN_DYNVARS_056_06------------------------#' +############################################################################## +# Check if the value in GLOBAL & SESSION Tables matches values in variable # +############################################################################## + +SELECT @@global.key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; + +SELECT @@key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.SESSION_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; + + +--echo '#---------------------FN_DYNVARS_056_07----------------------#' +################################################################### +# Check if TRUE and FALSE values can be used on variable # +################################################################### + +SET @@global.key_cache_file_hash_size = TRUE; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = FALSE; +SELECT @@global.key_cache_file_hash_size; + + +--echo '#---------------------FN_DYNVARS_056_08----------------------#' +######################################################################################################## +# Check if accessing variable with SESSION,LOCAL and without SCOPE points to same session variable # +######################################################################################################## + +SET @@global.key_cache_file_hash_size = 150; +SELECT @@key_cache_file_hash_size = @@global.key_cache_file_hash_size; + + +--echo '#---------------------FN_DYNVARS_056_09----------------------#' +########################################################################## ####### +# Check if key_cache_file_hash_size can be accessed with and without @@ sign # +################################################################################## + +--Error ER_GLOBAL_VARIABLE +SET key_cache_file_hash_size = 8000; +SELECT @@key_cache_file_hash_size; +--Error ER_PARSE_ERROR +SET local.key_cache_file_hash_size = 10; +--Error ER_UNKNOWN_TABLE +SELECT local.key_cache_file_hash_size; +--Error ER_PARSE_ERROR +SET global.key_cache_file_hash_size = 10; +--Error ER_UNKNOWN_TABLE +SELECT global.key_cache_file_hash_size; +--Error ER_BAD_FIELD_ERROR +SELECT key_cache_file_hash_size = @@session.key_cache_file_hash_size; + + +############################## +# Restore initial value # +############################## + +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size; + + +######################################################################## +# END OF key_cache_file_hash_size TESTS # +######################################################################## diff --git a/mysql-test/suite/sys_vars/t/report_port_basic.test b/mysql-test/suite/sys_vars/t/report_port_basic.test index 903289230bd..dfe64dd4e8d 100644 --- a/mysql-test/suite/sys_vars/t/report_port_basic.test +++ b/mysql-test/suite/sys_vars/t/report_port_basic.test @@ -2,7 +2,7 @@ # # only global # ---replace_regex s/[0-9]+/DEFAULT_MASTER_PORT/ +--replace_regex /[0-9]+/DEFAULT_MASTER_PORT/ select @@global.report_port; --error ER_INCORRECT_GLOBAL_LOCAL_VAR select @@session.report_port; diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test index 09294b60a04..3040c5b5392 100644 --- a/mysql-test/t/ctype_ucs.test +++ b/mysql-test/t/ctype_ucs.test @@ -796,6 +796,23 @@ DROP TABLE t1; --echo # +--echo # MDEV-5745 analyze MySQL fix for bug#12368495 +--echo # +SELECT CHAR_LENGTH(TRIM(LEADING 0x000000 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _ucs2 0x0061)); + +SELECT CHAR_LENGTH(TRIM(TRAILING 0x000000 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _ucs2 0x0061)); + +SELECT CHAR_LENGTH(TRIM(BOTH 0x000000 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _ucs2 0x0061)); + + +--echo # --echo # End of 5.5 tests --echo # diff --git a/mysql-test/t/ctype_utf32.test b/mysql-test/t/ctype_utf32.test index 2fbe452a716..b4ed48d07a5 100644 --- a/mysql-test/t/ctype_utf32.test +++ b/mysql-test/t/ctype_utf32.test @@ -873,6 +873,22 @@ ORDER BY l DESC; SELECT '2010-10-10 10:10:10' + INTERVAL GeometryType(GeomFromText('POINT(1 1)')) hour_second; --echo # +--echo # MDEV-5745 analyze MySQL fix for bug#12368495 +--echo # +SELECT CHAR_LENGTH(TRIM(LEADING 0x0000000000 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _utf32 0x00000061)); + +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0000000000 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _utf32 0x00000061)); + +SELECT CHAR_LENGTH(TRIM(BOTH 0x0000000000 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _utf32 0x00000061)); + +--echo # --echo # End of 5.5 tests --echo # diff --git a/mysql-test/t/features.test b/mysql-test/t/features.test index cdfc9413da5..f2ac5a5bba6 100644 --- a/mysql-test/t/features.test +++ b/mysql-test/t/features.test @@ -6,6 +6,8 @@ drop table if exists t1; --enable_warnings +flush status; + show status like "feature%"; --echo # @@ -109,3 +111,20 @@ select updatexml('<div><div><span>1</span><span>2</span></div></div>', '/','<tr><td>1</td><td>2</td></tr>') as upd1; --replace_result 4 2 show status like "feature_xml"; + + +--echo # +--echo # Feature delayed_keys +--echo # + +create table t1 (a int, key(a)) engine=myisam delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; + +create table t1 (a int, key(a)) engine=aria delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; + +show status like "feature_delay_key_write"; diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test index 61a237cb447..f04fda55608 100644 --- a/mysql-test/t/func_str.test +++ b/mysql-test/t/func_str.test @@ -1595,6 +1595,11 @@ call foo('(( 00000000 ++ 00000000 ))'); drop procedure foo; drop table t1,t2; +# +# Bug#18786138 SHA/MD5 HASHING FUNCTIONS DIE WITH "FILENAME" CHARACTER SET +# +select md5(_filename "a"), sha(_filename "a"); + --echo # --echo # End of 5.5 tests --echo # diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 6bea1aab392..a3f488a8d1e 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -1243,13 +1243,47 @@ CREATE TABLE t1 AS SELECT SHOW COLUMNS FROM t1; DROP TABLE t1; +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))*1; +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10')))*1; +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10'))); +SELECT * FROM t1 GROUP BY ABS(FROM_UNIXTIME(concat(a,'10'))); +SELECT * FROM t1 GROUP BY @a:=(FROM_UNIXTIME(concat(a,'10'))*1); + +DROP TABLE t1; + +SET TIME_ZONE='+02:00'; + +--echo # +--echo # MDEV-6302 Wrong result set when using GROUP BY FROM_UNIXTIME(...)+0 +--echo # +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT a, FROM_UNIXTIME(CONCAT(a,'10')) AS f1, FROM_UNIXTIME(CONCAT(a,'10'))+0 AS f2 FROM t1; +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(CONCAT(a,'10'))+0; +DROP TABLE t1; + +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))/1; +DROP TABLE t1; + +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'); +SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +SELECT CHAR_LENGTH(CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10')))) AS f2 FROM t1; +CREATE TABLE t2 AS SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +SHOW CREATE TABLE t2; +SELECT * FROM t2; +DROP TABLE t1,t2; + --echo # --echo # MDEV-4635 Crash in UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')) --echo # -SET TIME_ZONE='+02:00'; SELECT UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')); -SET TIME_ZONE=DEFAULT; +SET TIME_ZONE=DEFAULT; --echo # --echo # MDEV-4863 COALESCE(time_or_datetime) returns wrong results in numeric context @@ -1589,3 +1623,11 @@ SELECT IFNULL(TIME'10:20:30',DATE'2001-01-01'); SELECT CASE WHEN 1 THEN TIME'10:20:30' ELSE DATE'2001-01-01' END; SELECT COALESCE(TIME'10:20:30',DATE'2001-01-01'); SET timestamp=DEFAULT; + +--echo # +--echo # MDEV-5750 Assertion `ltime->year == 0' fails on a query with EXTRACT DAY_MINUTE and TIME column +--echo # +CREATE TABLE t1 ( d DATE, t TIME ); +INSERT INTO t1 VALUES ('2008-12-05','22:34:09'),('2005-03-27','14:26:02'); +SELECT EXTRACT(DAY_MINUTE FROM GREATEST(t,d)), GREATEST(t,d) FROM t1; +DROP TABLE t1; diff --git a/mysql-test/t/gis-debug.test b/mysql-test/t/gis-debug.test new file mode 100644 index 00000000000..a0647a2c9f4 --- /dev/null +++ b/mysql-test/t/gis-debug.test @@ -0,0 +1,6 @@ +--source include/have_geometry.inc +--source include/have_debug.inc + +SET @tmp=ST_GIS_DEBUG(1); + +--source include/gis_debug.inc diff --git a/mysql-test/t/gis-precise.test b/mysql-test/t/gis-precise.test index 0c6410b5a75..c6cf42e86e4 100644 --- a/mysql-test/t/gis-precise.test +++ b/mysql-test/t/gis-precise.test @@ -69,12 +69,15 @@ select astext(ST_Intersection(GeomFromText('LINESTRING(0 0, 50 45, 40 50, 0 0)') select astext(ST_Intersection(GeomFromText('LINESTRING(0 0, 50 45, 40 50)'), GeomFromText('LINESTRING(50 5, 55 10, 0 45)'))); select astext(ST_Intersection(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('POINT(20 20)'))); select astext(ST_Intersection(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200)'))); +--replace_result 7.999999999999999 8 select astext(ST_Intersection(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); +--replace_result 7.999999999999999 8 select astext(ST_UNION(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); select astext(ST_intersection(geomfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), geomfromtext('polygon((0 0, 1 1, 0 2, 0 0))'))); select astext(ST_symdifference(geomfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), geomfromtext('polygon((0 0, 1 1, 0 2, 0 0))'))); +--replace_result 7.999999999999999 8 select astext(ST_UNION(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); # Buffer() tests @@ -83,13 +86,13 @@ select astext(ST_buffer(geometryfromtext('point(1 1)'), 1)); create table t1(geom geometrycollection); insert into t1 values (geomfromtext('POLYGON((0 0, 10 10, 0 8, 0 0))')); insert into t1 values (geomfromtext('POLYGON((1 1, 10 10, 0 8, 1 1))')); -select astext(geom), area(geom),area(ST_buffer(geom,2)) from t1; -select astext(ST_buffer(geom,2)) from t1; +select astext(geom), area(geom),round(area(ST_buffer(geom,2)), 7) from t1; +select ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom,2))) from t1; set @geom=geomfromtext('LINESTRING(2 1, 4 2, 2 3, 2 5)'); set @buff=ST_buffer(@geom,1); --replace_result 40278744502097 40278744502096 -select astext(@buff); +select ST_NUMPOINTS(ST_EXTERIORRING(@buff)); # cleanup DROP TABLE t1; @@ -108,6 +111,32 @@ SELECT ST_Equals(PolyFromText('POLYGON((67 13, 67 18, 67 18, 59 18, 59 13, 67 13 SELECT ST_Equals(PolyFromText('POLYGON((67 13, 67 18, 67 18, 59 18, 59 13, 67 13) )'),PolyFromText('POLYGON((67 13, 67 18, 59 18, 59 13, 59 13, 67 13) )')) as result; SELECT ST_Equals(PointFromText('POINT (12 13)'),PointFromText('POINT (12 13)')) as result; + +--echo # +--echo # BUG#11755628/47429: INTERSECTION FUNCTION CRASHED MYSQLD +--echo # BUG#11759650/51979: UNION/INTERSECTION OF POLYGONS CRASHES MYSQL +--echo # + +SELECT ASTEXT(ST_UNION(GEOMFROMTEXT('POLYGON((525000 183300,525400 +183300,525400 18370, 525000 183700,525000 183300))'), +geomfromtext('POLYGON((525298.67 183511.53,525296.57 +183510.39,525296.42 183510.31,525289.11 183506.62,525283.17 +183503.47,525280.98 183502.26,525278.63 183500.97,525278.39 +183500.84,525276.79 183500,525260.7 183491.55,525263.95 +183484.75,525265.58 183481.95,525278.97 183488.73,525276.5 +183493.45,525275.5 183495.7,525280.35 183498.2,525282.3 +183499.1,525282.2 183499.3,525283.55 183500,525301.75 +183509.35,525304.45 183504.25,525307.85 183504.95,525304.5 +183510.83,525302.81 183513.8,525298.67 183511.53),(525275.06 +183489.89,525272.06 183488.37,525268.94 183494.51,525271.94 +183496.03,525275.06 183489.89),(525263.26 183491.55,525266.15 +183493.04,525269.88 183485.82,525266.99 183484.33,525263.26 +183491.55))'))) st_u; + +SET @a=0x0000000001030000000200000005000000000000000000000000000000000000000000000000002440000000000000000000000000000024400000000000002440000000000000000000000000000024400000000000000000000000000000000000000000000000000000F03F000000000000F03F0000000000000040000000000000F03F00000000000000400000000000000040000000000000F03F0000000000000040000000000000F03F000000000000F03F; +SELECT ASTEXT(TOUCHES(@a, GEOMFROMTEXT('point(0 0)'))) t; + + # bug #801243 Assertion `(0)' failed in Gis_geometry_collection::init_from_opresult on ST_UNION SELECT astext(ST_UNION ( @@ -135,11 +164,10 @@ SELECT ASTEXT(ST_INTERSECTION( #bug 804324 Assertion 0 in Gcalc_scan_iterator::pop_suitable_intersection ---replace_result 61538461538462 61538461538461 -SELECT ASTEXT(ST_UNION( +SELECT ROUND(ST_LENGTH(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), (8 2,1 3,9 0,4 4))'), - MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))); + MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))), 7); SELECT ST_NUMGEOMETRIES((ST_UNION(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 0,4 2,0 2,1 5,0 3,7 0,8 5,5 8), @@ -219,6 +247,7 @@ SELECT AsText(ST_UNION(POLYGONFROMTEXT('POLYGON((12 9, 3 6, 3 0, 12 9))'), POLYG #bug 841622 Assertion `t->rp->type == Gcalc_function::shape_line' failed in Gcalc_operation_reducer::end_line in maria-5.3-gis +--replace_result 276 278 SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((3 4, 2 5, 7 6, 1 8),(0 0 ,1 6 ,0 1, 8 9, 2 4, 6 1, 3 5, 4 8), (9 3, 5 4, 1 8, 4 2, 5 8, 3 0))' ) , MULTILINESTRINGFROMTEXT('MULTILINESTRING((3 4, 3 1, 2 7, 4 2, 6 2, 1 5))') @@ -313,8 +342,8 @@ SELECT ST_WITHIN( MULTIPOINTFROMTEXT(' MULTIPOINT( 2 9 , 2 9 , 4 9 , 9 1 ) ') , SELECT ST_INTERSECTS( GeomFromText('MULTILINESTRING( ( 4030 3045 , 3149 2461 , 3004 3831 , 3775 2976 ) )') , GeomFromText('LINESTRING(3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29,3039.07 3175.05,3039.07 3175.05,3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29)') ); -#bug 977201 ST_BUFFER fails with the negative D -select ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)); +#bug 977201 ST_BUFFER fails with the negative D. TODO - check the result deeper. +# select ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)); #bug 986977 Assertion `!cur_p->event' failed in Gcalc_scan_iterator::arrange_event(int, int) SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 -3.0, @@ -328,3 +357,5 @@ SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 # MDEV-5615 crash in Gcalc_function::add_operation select astext(buffer(st_linestringfromwkb(linestring(point(-1,1), point(-1,-2))),-1)); +--source include/gis_debug.inc + diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index c38706959e4..d20e4c1711e 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -541,6 +541,18 @@ insert into t1 values(default); drop table t1; # +# Bug #27300: create view with geometry functions lost columns types +# +CREATE TABLE t1 (a GEOMETRY); +CREATE VIEW v1 AS SELECT GeomFromwkb(ASBINARY(a)) FROM t1; +CREATE VIEW v2 AS SELECT a FROM t1; +DESCRIBE v1; +DESCRIBE v2; + +DROP VIEW v1,v2; +DROP TABLE t1; + +# # Bug#24563: MBROverlaps does not seem to function propertly # Bug#54888: MBROverlaps missing in 5.1? # diff --git a/mysql-test/t/group_min_max.test b/mysql-test/t/group_min_max.test index f1a287054ca..c809401bbf8 100644 --- a/mysql-test/t/group_min_max.test +++ b/mysql-test/t/group_min_max.test @@ -1418,6 +1418,31 @@ drop table t1; --echo # End of test#50539. --echo # +--echo # Bug#17217128 - BAD INTERACTION BETWEEN MIN/MAX AND +--echo # "HAVING SUM(DISTINCT)": WRONG RESULTS. +--echo # + +CREATE TABLE t (a INT, b INT, KEY(a,b)); +INSERT INTO t VALUES (1,1), (2,2), (3,3), (4,4), (1,0), (3,2), (4,5); +ANALYZE TABLE t; + +SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; + +SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; +EXPLAIN SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; + +SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); +EXPLAIN SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); + +SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; +EXPLAIN SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; + +SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +DROP TABLE t; + +--echo # --echo # MDEV-4219 A simple select query returns random data (upstream bug#68473) --echo # diff --git a/mysql-test/t/group_min_max_innodb.test b/mysql-test/t/group_min_max_innodb.test index 7038eb2ff47..6967f847147 100644 --- a/mysql-test/t/group_min_max_innodb.test +++ b/mysql-test/t/group_min_max_innodb.test @@ -137,3 +137,96 @@ SELECT COUNT(DISTINCT a) FROM t1 WHERE b = 'b'; DROP TABLE t1; --echo End of 5.5 tests + +--echo # +--echo # Bug#17909656 - WRONG RESULTS FOR A SIMPLE QUERY WITH GROUP BY +--echo # + +CREATE TABLE t0 ( + i1 INTEGER NOT NULL +); + +INSERT INTO t0 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10), + (11),(12),(13),(14),(15),(16),(17),(18),(19),(20), + (21),(22),(23),(24),(25),(26),(27),(28),(29),(30); + +CREATE TABLE t1 ( + c1 CHAR(1) NOT NULL, + i1 INTEGER NOT NULL, + i2 INTEGER NOT NULL, + UNIQUE KEY k1 (c1,i2) +) ENGINE=InnoDB; + +INSERT INTO t1 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'F',i1,i1 FROM t0; + +CREATE TABLE t2 ( + c1 CHAR(1) NOT NULL, + i1 INTEGER NOT NULL, + i2 INTEGER NOT NULL, + UNIQUE KEY k2 (c1,i1,i2) +) ENGINE=InnoDB; + +INSERT INTO t2 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'F',i1,i1 FROM t0; + +-- disable_result_log +ANALYZE TABLE t1; +ANALYZE TABLE t2; +-- enable_result_log + +let query= +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F') +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17)) +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 ) +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, max(i2) FROM t1 +WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 ))) +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, i1, max(i2) FROM t2 +WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) +GROUP BY c1,i1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, i1, max(i2) FROM t2 +WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) +GROUP BY c1,i1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, i1, max(i2) FROM t2 +WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 )) +GROUP BY c1,i1; +eval EXPLAIN $query; +eval $query; + +DROP TABLE t0,t1,t2; diff --git a/mysql-test/t/huge_frm-6224.test b/mysql-test/t/huge_frm-6224.test new file mode 100644 index 00000000000..418722a7b51 --- /dev/null +++ b/mysql-test/t/huge_frm-6224.test @@ -0,0 +1,20 @@ +# +# MDEV-6224 Incorrect information in file when *.frm is > 256K +# +# verify that huge frms are rejected during creation, not on opening +# +--source include/have_partition.inc + +let $n=5646; +let $a=create table t1 (a int) engine=myisam partition by hash(a) partitions $n (; +dec $n; +while ($n) +{ + let $a=$a partition p01234567890123456789012345678901234567890123456789012345678$n,; + dec $n; +} + +--disable_query_log +--error ER_TABLE_DEFINITION_TOO_BIG +eval $a partition foo); + diff --git a/mysql-test/t/innodb_load_xa.opt b/mysql-test/t/innodb_load_xa.opt new file mode 100644 index 00000000000..4ff27e659ce --- /dev/null +++ b/mysql-test/t/innodb_load_xa.opt @@ -0,0 +1 @@ +--ignore-builtin-innodb --loose-innodb --log-bin diff --git a/mysql-test/t/innodb_load_xa.test b/mysql-test/t/innodb_load_xa.test new file mode 100644 index 00000000000..52862151b22 --- /dev/null +++ b/mysql-test/t/innodb_load_xa.test @@ -0,0 +1,18 @@ +# +# MDEV-6082 Assertion `0' fails in TC_LOG_DUMMY::log_and_order on DML after installing TokuDB at runtime on server with disabled InnoDB +# +--source include/not_embedded.inc + +if (!$HA_INNODB_SO) { + --skip Need InnoDB plugin +} +install plugin innodb soname 'ha_innodb'; +select engine,support,transactions,xa from information_schema.engines where engine='innodb'; +create table t1 (a int) engine=innodb; +start transaction; +insert t1 values (1); +insert t1 values (2); +commit; +--source include/show_binlog_events.inc +drop table t1; +uninstall plugin innodb; diff --git a/mysql-test/t/innodb_mysql_lock2.test b/mysql-test/t/innodb_mysql_lock2.test index b7259e771ae..640f9652462 100644 --- a/mysql-test/t/innodb_mysql_lock2.test +++ b/mysql-test/t/innodb_mysql_lock2.test @@ -440,15 +440,16 @@ let $wait_statement= $statement; --echo # 4.1 SELECT/SET with a stored function which does not --echo # modify data and uses SELECT in its turn. --echo # ---echo # In theory there is no need to take row locks on the table +--echo # There is no need to take row locks on the table --echo # being selected from in SF as the call to such function ---echo # won't get into the binary log. In practice, however, we ---echo # discover that fact too late in the process to be able to ---echo # affect the decision what locks should be taken. ---echo # Hence, strong locks are taken in this case. +--echo # won't get into the binary log. +--echo # +--echo # However in practice innodb takes strong lock on tables +--echo # being selected from within SF, when SF is called from +--echo # non SELECT statements like 'set' statement below. let $statement= select f1(); let $wait_statement= select i from t1 where i = 1 into j; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f1(); let $wait_statement= select i from t1 where i = 1 into j; --source include/check_shared_row_lock.inc @@ -486,19 +487,21 @@ let $wait_statement= select i from t1 where i = 1 into k; --echo # modify data and reads a table through subselect --echo # in a control construct. --echo # ---echo # Again, in theory a call to this function won't get to the ---echo # binary log and thus no locking is needed. But in practice ---echo # we don't detect this fact early enough (get_lock_type_for_table()) ---echo # to avoid taking row locks. +--echo # Call to this function won't get to the +--echo # binary log and thus no locking is needed. +--echo # +--echo # However in practice innodb takes strong lock on tables +--echo # being selected from within SF, when SF is called from +--echo # non SELECT statements like 'set' statement below. let $statement= select f3(); let $wait_statement= $statement; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f3(); let $wait_statement= $statement; --source include/check_shared_row_lock.inc let $statement= select f4(); let $wait_statement= $statement; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f4(); let $wait_statement= $statement; --source include/check_shared_row_lock.inc @@ -539,19 +542,21 @@ let $wait_statement= insert into t2 values ((select i from t1 where i = 1) + 5); --echo # doesn't modify data and reads tables through --echo # a view. --echo # ---echo # Once again, in theory, calls to such functions won't ---echo # get into the binary log and thus don't need row ---echo # locks. But in practice this fact is discovered ---echo # too late to have any effect. +--echo # Calls to such functions won't get into +--echo # the binary log and thus don't need row locks. +--echo # +--echo # However in practice innodb takes strong lock on tables +--echo # being selected from within SF, when SF is called from +--echo # non SELECT statements like 'set' statement below. let $statement= select f6(); let $wait_statement= select i from v1 where i = 1 into k; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f6(); let $wait_statement= select i from v1 where i = 1 into k; --source include/check_shared_row_lock.inc let $statement= select f7(); let $wait_statement= select j from v2 where j = 1 into k; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f7(); let $wait_statement= select j from v2 where j = 1 into k; --source include/check_shared_row_lock.inc @@ -592,12 +597,11 @@ let $wait_statement= update v2 set j=j+10 where j=1; --echo # data and reads a table indirectly, by calling another --echo # function. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire row locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire row locks. let $statement= select f10(); let $wait_statement= select i from t1 where i = 1 into j; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc --echo # --echo # 4.11 INSERT which uses a stored function which doesn't modify @@ -676,12 +680,11 @@ let $wait_statement= select i from t1 where i = 1 into p; --echo # 5.3 SELECT that calls a function that doesn't modify data and --echo # uses a CALL statement that reads a table via SELECT. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire row locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire row locks. let $statement= select f15(); let $wait_statement= select i from t1 where i = 1 into p; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc --echo # --echo # 5.4 INSERT which calls function which doesn't modify data and diff --git a/mysql-test/t/ipv4_and_ipv6.opt b/mysql-test/t/ipv4_and_ipv6.opt new file mode 100644 index 00000000000..a22a7b100c8 --- /dev/null +++ b/mysql-test/t/ipv4_and_ipv6.opt @@ -0,0 +1 @@ +--skip-name-resolve --bind-address=* diff --git a/mysql-test/t/ipv4_and_ipv6.test b/mysql-test/t/ipv4_and_ipv6.test new file mode 100644 index 00000000000..19ab4a253cc --- /dev/null +++ b/mysql-test/t/ipv4_and_ipv6.test @@ -0,0 +1,13 @@ +--source include/check_ipv6.inc +--source include/not_embedded.inc + +echo =============Test of '::1' ========================================; +let $IPv6= ::1; +--source include/ipv6_clients.inc +--source include/ipv6.inc + +echo =============Test of '127.0.0.1' (IPv4) ===========================; +let $IPv6= 127.0.0.1; +--source include/ipv6_clients.inc +--source include/ipv6.inc + diff --git a/mysql-test/t/key_cache.test b/mysql-test/t/key_cache.test index 9098ca466b7..86e56a8301b 100644 --- a/mysql-test/t/key_cache.test +++ b/mysql-test/t/key_cache.test @@ -8,6 +8,7 @@ drop table if exists t1, t2, t3; SET @save_key_buffer_size=@@key_buffer_size; SET @save_key_cache_block_size=@@key_cache_block_size; SET @save_key_cache_segments=@@key_cache_segments; +SET @save_key_cache_file_hash_size=@@key_cache_file_hash_size; SELECT @@key_buffer_size, @@small.key_buffer_size; @@ -62,19 +63,19 @@ select @@keycache1.key_buffer_size; select @@keycache1.key_cache_block_size; select @@key_buffer_size; select @@key_cache_block_size; +select @@key_cache_file_hash_size; set global keycache1.key_buffer_size=1024*1024; +let org_key_blocks_unused=`select unused_blocks as unused from information_schema.key_caches where key_cache_name="default"`; +--disable_query_log +eval set @org_key_blocks_unused=$org_key_blocks_unused; +--enable_query_log + create table t1 (p int primary key, a char(10)) delay_key_write=1; create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a)); -show status like 'key_blocks_used'; - -# Following results differs on 64 and 32 bit systems because of different -# pointer sizes, which takes up different amount of space in key cache - ---replace_result 1812 KEY_BLOCKS_UNUSED 1793 KEY_BLOCKS_UNUSED 1674 KEY_BLOCKS_UNUSED 1818 KEY_BLOCKS_UNUSED 1824 KEY_BLOCKS_UNUSED -show status like 'key_blocks_unused'; +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; insert into t1 values (1, 'qqqq'), (11, 'yyyy'); insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'), @@ -85,9 +86,7 @@ select * from t2; update t1 set p=2 where p=1; update t2 set i=2 where i=1; -show status like 'key_blocks_used'; ---replace_result 1808 KEY_BLOCKS_UNUSED 1789 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1814 KEY_BLOCKS_UNUSED 1820 KEY_BLOCKS_UNUSED -show status like 'key_blocks_unused'; +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; cache index t1 key (`primary`) in keycache1; @@ -147,9 +146,7 @@ cache index t3 in keycache2; cache index t1,t2 in default; drop table t1,t2,t3; -show status like 'key_blocks_used'; ---replace_result 1812 KEY_BLOCKS_UNUSED 1793 KEY_BLOCKS_UNUSED 1674 KEY_BLOCKS_UNUSED 1818 KEY_BLOCKS_UNUSED 1824 KEY_BLOCKS_UNUSED -show status like 'key_blocks_unused'; +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; create table t1 (a int primary key); cache index t1 in keycache2; @@ -304,7 +301,7 @@ select * from t2; update t1 set p=3 where p=1; update t2 set i=2 where i=1; ---replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1789 KEY_BLOCKS_UNUSED +--replace_result 1804 KEY_BLOCKS_UNUSED 1801 KEY_BLOCKS_UNUSED 1663 KEY_BLOCKS_UNUSED 1782 KEY_BLOCKS_UNUSED show status like 'key_%'; --replace_column 7 # select * from information_schema.key_caches where segment_number is null; @@ -336,7 +333,8 @@ select * from t2; update t1 set p=3 where p=1; update t2 set i=2 where i=1; ---replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1788 KEY_BLOCKS_UNUSED + +--replace_result 1800 KEY_BLOCKS_UNUSED 1794 KEY_BLOCKS_UNUSED 1656 KEY_BLOCKS_UNUSED 1775 KEY_BLOCKS_UNUSED show status like 'key_%'; --replace_column 7 # select * from information_schema.key_caches where segment_number is null; @@ -361,7 +359,7 @@ select * from t2; update t1 set p=3 where p=1; update t2 set i=2 where i=1; ---replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1789 KEY_BLOCKS_UNUSED +--replace_result 1804 KEY_BLOCKS_UNUSED 1801 KEY_BLOCKS_UNUSED 1663 KEY_BLOCKS_UNUSED 1782 KEY_BLOCKS_UNUSED show status like 'key_%'; --replace_column 7 # select * from information_schema.key_caches where segment_number is null; @@ -378,6 +376,7 @@ select * from information_schema.key_caches where segment_number is null; # Switch back to 2 segments set global key_buffer_size=32*1024; +set global key_cache_file_hash_size=128; select @@key_buffer_size; set global key_cache_segments=2; select @@key_cache_segments; @@ -536,5 +535,6 @@ set global keycache2.key_buffer_size=0; set global key_buffer_size=@save_key_buffer_size; set global key_cache_segments=@save_key_cache_segments; +set global key_cache_file_hash_size=@save_key_cache_file_hash_size; # End of 5.2 tests diff --git a/mysql-test/t/lock_sync.test b/mysql-test/t/lock_sync.test index d5ad7becd7d..f00080d917b 100644 --- a/mysql-test/t/lock_sync.test +++ b/mysql-test/t/lock_sync.test @@ -49,6 +49,7 @@ drop table if exists t0, t1, t2, t3, t4, t5; drop view if exists v1, v2; drop procedure if exists p1; drop procedure if exists p2; +drop procedure if exists p3; drop function if exists f1; drop function if exists f2; drop function if exists f3; @@ -64,6 +65,8 @@ drop function if exists f12; drop function if exists f13; drop function if exists f14; drop function if exists f15; +drop function if exists f16; +drop function if exists f17; --enable_warnings create table t1 (i int primary key); insert into t1 values (1), (2), (3), (4), (5); @@ -170,6 +173,26 @@ begin call p2(k); return k; end| +create function f16() returns int +begin + create temporary table if not exists temp1 (a int); + insert into temp1 select * from t1; + drop temporary table temp1; + return 1; +end| +create function f17() returns int +begin + declare j int; + select i from t1 where i = 1 into j; + call p3; + return 1; +end| +create procedure p3() +begin + create temporary table if not exists temp1 (a int); + insert into temp1 select * from t1; + drop temporary table temp1; +end| create trigger t4_bi before insert on t4 for each row begin declare k int; @@ -217,6 +240,7 @@ connection con1; --disable_result_log show create procedure p1; show create procedure p2; +show create procedure p3; show create function f1; show create function f2; show create function f3; @@ -232,6 +256,8 @@ show create function f12; show create function f13; show create function f14; show create function f15; +show create function f16; +show create function f17; --enable_result_log --echo # Switch back to connection 'default'. connection default; @@ -492,18 +518,15 @@ let $restore_table= t2; --echo # 4.1 SELECT/SET with a stored function which does not --echo # modify data and uses SELECT in its turn. --echo # ---echo # In theory there is no need to take strong locks on the table +--echo # There is no need to take strong locks on the table --echo # being selected from in SF as the call to such function ---echo # won't get into the binary log. In practice, however, we ---echo # discover that fact too late in the process to be able to ---echo # affect the decision what locks should be taken. ---echo # Hence, strong locks are taken in this case. +--echo # won't get into the binary log. let $statement= select f1(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f1(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.2 INSERT (or other statement which modifies data) with @@ -538,22 +561,20 @@ let $restore_table= t2; --echo # modify data and reads a table through subselect --echo # in a control construct. --echo # ---echo # Again, in theory a call to this function won't get to the ---echo # binary log and thus no strong lock is needed. But in practice ---echo # we don't detect this fact early enough (get_lock_type_for_table()) ---echo # to avoid taking a strong lock. +--echo # Call to this function won't get to the +--echo # binary log and thus no strong lock is needed. let $statement= select f3(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f3(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= select f4(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f4(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.5. INSERT (or other statement which modifies data) with @@ -591,22 +612,21 @@ let $restore_table= t2; --echo # doesn't modify data and reads tables through --echo # a view. --echo # ---echo # Once again, in theory, calls to such functions won't ---echo # get into the binary log and thus don't need strong ---echo # locks. But in practice this fact is discovered ---echo # too late to have any effect. +--echo # Calls to such functions won't get into +--echo # the binary log and thus don't need strong +--echo # locks. let $statement= select f6(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f6(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= select f7(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f7(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.8 INSERT which uses stored function which @@ -644,12 +664,11 @@ let $restore_table= t2; --echo # data and reads a table indirectly, by calling another --echo # function. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire strong locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire strong locks. let $statement= select f10(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.11 INSERT which uses a stored function which doesn't modify @@ -700,6 +719,36 @@ let $statement= insert into t2 values (f13((select i+10 from t1 where i=1))); let $restore_table= t2; --source include/check_no_concurrent_insert.inc +--echo # +--echo # 4.15 SELECT/SET with a stored function which +--echo # inserts data into a temporary table using +--echo # SELECT on t1. +--echo # +--echo # Since this statement is written to the binary log it should +--echo # be serialized with concurrent statements affecting the data it +--echo # uses. Therefore it should take strong locks on the data it reads. +let $statement= select f16(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc +let $statement= set @a:= f16(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc + +--echo # +--echo # 4.16 SELECT/SET with a stored function which call procedure +--echo # which inserts data into a temporary table using +--echo # SELECT on t1. +--echo # +--echo # Since this statement is written to the binary log it should +--echo # be serialized with concurrent statements affecting the data it +--echo # uses. Therefore it should take strong locks on the data it reads. +let $statement= select f17(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc +let $statement= set @a:= f17(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc + --echo # --echo # 5. Statements that read tables through stored procedures. @@ -730,12 +779,11 @@ let $restore_table= t2; --echo # 5.3 SELECT that calls a function that doesn't modify data and --echo # uses a CALL statement that reads a table via SELECT. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire strong locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire strong locks. let $statement= select f15(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 5.4 INSERT which calls function which doesn't modify data and @@ -800,7 +848,6 @@ let $statement= update t5 set l= 2 where l = 1; let $restore_table= t5; --source include/check_no_concurrent_insert.inc - --echo # Clean-up. drop function f1; drop function f2; @@ -817,9 +864,12 @@ drop function f12; drop function f13; drop function f14; drop function f15; +drop function f16; +drop function f17; drop view v1, v2; drop procedure p1; drop procedure p2; +drop procedure p3; drop table t1, t2, t3, t4, t5; disconnect con1; diff --git a/mysql-test/t/log_tables_upgrade.test b/mysql-test/t/log_tables_upgrade.test index feb2d8c4aa9..d08d74174db 100644 --- a/mysql-test/t/log_tables_upgrade.test +++ b/mysql-test/t/log_tables_upgrade.test @@ -1,12 +1,6 @@ ---source include/not_embedded.inc --source include/have_csv.inc --source include/have_innodb.inc - -# Only run test if "mysql_upgrade" is found ---require r/have_mysql_upgrade.result ---disable_query_log -select LENGTH("$MYSQL_UPGRADE")>0 as have_mysql_upgrade; ---enable_query_log +--source include/mysql_upgrade_preparation.inc --echo # --echo # Bug#49823: mysql_upgrade fatal error due to general_log / slow_low CSV NULL diff --git a/mysql-test/t/long_tmpdir-master.sh b/mysql-test/t/long_tmpdir-master.sh index 7bcbee26105..7bcbee26105 100644..100755 --- a/mysql-test/t/long_tmpdir-master.sh +++ b/mysql-test/t/long_tmpdir-master.sh diff --git a/mysql-test/t/lowercase_mixed_tmpdir-master.sh b/mysql-test/t/lowercase_mixed_tmpdir-master.sh index 9330d0581ee..9330d0581ee 100644..100755 --- a/mysql-test/t/lowercase_mixed_tmpdir-master.sh +++ b/mysql-test/t/lowercase_mixed_tmpdir-master.sh diff --git a/mysql-test/t/mysql_client_test-master.opt b/mysql-test/t/mysql_client_test-master.opt index 5b347aa0416..fcaf2b69fbc 100644 --- a/mysql-test/t/mysql_client_test-master.opt +++ b/mysql-test/t/mysql_client_test-master.opt @@ -1,3 +1,4 @@ --general-log --general-log-file=$MYSQLTEST_VARDIR/log/master.log --log-output=FILE,TABLE +--max-allowed-packet=32000000 diff --git a/mysql-test/t/mysql_client_test_comp-master.opt b/mysql-test/t/mysql_client_test_comp-master.opt new file mode 100644 index 00000000000..783093c900b --- /dev/null +++ b/mysql-test/t/mysql_client_test_comp-master.opt @@ -0,0 +1,2 @@ +--loose-enable-performance-schema +--max-allowed-packet=32000000 diff --git a/mysql-test/t/mysql_client_test_comp.test b/mysql-test/t/mysql_client_test_comp.test new file mode 100644 index 00000000000..0a6b0ba1130 --- /dev/null +++ b/mysql-test/t/mysql_client_test_comp.test @@ -0,0 +1,20 @@ +# run mysql_client_test with performance schema + +# No need to run this with embedded server +-- source include/not_embedded.inc + +# need to have the dynamic loading turned on for the client plugin tests +--source include/have_plugin_auth.inc + +SET @old_slow_query_log= @@global.slow_query_log; + +call mtr.add_suppression(" Error reading file './client_test_db/test_frm_bug.frm'"); + +--exec echo "$MYSQL_CLIENT_TEST" > $MYSQLTEST_VARDIR/log/mysql_client_test_comp.out.log 2>&1 +--exec $MYSQL_CLIENT_TEST --getopt-ll-test=25600M >> $MYSQLTEST_VARDIR/log/mysql_client_test_comp.out.log 2>&1 + +# End of test +echo ok; + +# Restore state changed by mysql_test_run +SET @@global.slow_query_log= @old_slow_query_log; diff --git a/mysql-test/t/mysql_client_test_nonblock-master.opt b/mysql-test/t/mysql_client_test_nonblock-master.opt index 034d5340a23..5775e707c5f 100644 --- a/mysql-test/t/mysql_client_test_nonblock-master.opt +++ b/mysql-test/t/mysql_client_test_nonblock-master.opt @@ -1 +1,2 @@ --general-log --general-log-file=$MYSQLTEST_VARDIR/log/master.log --log-output=FILE,TABLE +--max-allowed-packet=32000000 diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test index ffbec36873e..ae59c713c3d 100644 --- a/mysql-test/t/mysqltest.test +++ b/mysql-test/t/mysqltest.test @@ -2053,7 +2053,7 @@ select "at" as col1, "AT" as col2, "c" as col3; --replace_regex /a/b/ /ct/d/ select "a" as col1, "ct" as col2; ---replace_regex /(strawberry)/raspberry and \1/ /blueberry/blackberry/ /potato/tomato/; +--replace_regex /(strawberry)/raspberry and \1/ /blueberry/blackberry/ /potato/tomato/ select "strawberry","blueberry","potato"; --error 1 @@ -2098,6 +2098,12 @@ select "a is a and less is more" as txt; select "a is a and less is more" as txt; --enable_query_log +# +# different delimiters +# +--replace_regex (a)[b] /c/d/ <e>{f}i {g\/\}}/h/ +select 'ABCDEF abcdef g/}' as txt; + #------------------------------------------------------------------------- # BUG #11754855 : Passing variable to --error #------------------------------------------------------------------------- diff --git a/mysql-test/t/order_by_innodb.test b/mysql-test/t/order_by_innodb.test new file mode 100644 index 00000000000..c20eaceb053 --- /dev/null +++ b/mysql-test/t/order_by_innodb.test @@ -0,0 +1,23 @@ +# +# ORDER BY handling (e.g. filesort) tests that require innodb +# +-- source include/have_innodb.inc + +--disable_warnings +drop table if exists t0,t1,t2,t3; +--enable_warnings + +--echo # +--echo # MDEV-6434: Wrong result (extra rows) with ORDER BY, multiple-column index, InnoDB +--echo # + +CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB; + +INSERT INTO t1 (a,c) VALUES +(8, 9),(8, 10),(13, 15),(16, 17),(16, 18),(16, 19),(20, 21), +(20, 22),(20, 24),(20, 25),(20, 26),(20, 27),(20, 28); + +SELECT * FROM t1 WHERE a = 8 AND (b = 1 OR b IS NULL) ORDER BY c; + +DROP TABLE t1; + diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 1e1150157c7..754677e9b37 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -2816,6 +2816,47 @@ select * from t1 IGNORE INDEX(dob, weeks_worked_last_year, hours_worked_per_week drop table t1; +--echo # +--echo # MDEV-6322: The PARTITION engine can return wrong query results +--echo # +CREATE TABLE t1 ( + CustomerID varchar(5) DEFAULT NULL, + CompanyName varchar(40) DEFAULT NULL, + ContactName varchar(30) DEFAULT NULL, + ContactTitle varchar(30) DEFAULT NULL, + Address varchar(60) DEFAULT NULL, + City varchar(15) DEFAULT NULL, + Region varchar(15) DEFAULT NULL, + PostalCode varchar(10) DEFAULT NULL, + Country varchar(15) NOT NULL, + Phone varchar(24) DEFAULT NULL, + Fax varchar(24) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS(Country) +(PARTITION p1 VALUES IN ('Germany','Austria','Switzerland','Poland'), + PARTITION p2 VALUES IN ('USA','Canada','Mexico'), + PARTITION p3 VALUES IN ('Spain','Portugal','Italy'), + PARTITION p4 VALUES IN ('UK','Ireland'), + PARTITION p5 VALUES IN ('France','Belgium'), + PARTITION p6 VALUES IN ('Sweden','Finland','Denmark','Norway'), + PARTITION p7 VALUES IN ('Venezuela','Argentina','Brazil') +); + +INSERT INTO t1 (CustomerID, City, Country) VALUES +('ANATR','México D.F','Mexico'), +('ANTON','México D.F','Mexico'), +('BOTTM','Tsawassen','Canada'), +('CENTC','México D.F','Mexico'), +('GREAL','Eugene','USA'), +('HUNGC','Elgin','USA'), +('LAUGB','Vancouver','Canada'), +('LAZYK','Walla Walla','USA'), +('LETSS','San Francisco','USA'), +('LONEP','Portland','USA'); + +SELECT * FROM t1 WHERE Country = 'USA'; +DROP TABLE t1; + # # Test ALTER TABLE ADD/DROP PARTITION IF EXISTS # diff --git a/mysql-test/t/partition_innodb.test b/mysql-test/t/partition_innodb.test index 43f409731a6..1e2aacd474a 100644 --- a/mysql-test/t/partition_innodb.test +++ b/mysql-test/t/partition_innodb.test @@ -777,6 +777,34 @@ drop table t3; drop table t1,t2; --echo # +--echo # MySQL Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +--echo # +create table t1(c1 int, c2 int, c3 int, c4 int, +primary key(c1,c2)) engine=InnoDB +partition by list columns(c2) +(partition p1 values in (1,2) engine=InnoDB, +partition p2 values in (3,4) engine=InnoDB); + +insert into t1 values (1,1,1,1),(2,3,1,1); +select * from t1 where c1=2 and c2=3; +drop table t1; + +--echo # +--echo # MySQL Bug#72803: Wrong "Impossible where" with LIST partitioning +--echo # also MDEV-6240: Wrong "Impossible where" with LIST partitioning +--echo # +CREATE TABLE t1 ( d DATE) ENGINE = InnoDB +PARTITION BY LIST COLUMNS (d) +( + PARTITION p0 VALUES IN ('1990-01-01','1991-01-01'), + PARTITION p1 VALUES IN ('1981-01-01') +); + +INSERT INTO t1 (d) VALUES ('1991-01-01'); +SELECT * FROM t1 WHERE d = '1991-01-01'; +DROP TABLE t1; + +--echo # --echo # MDEV-5963: InnoDB: Assertion failure in file row0sel.cc line 2503, --echo # Failing assertion: 0 with "key ptr now exceeds key end by 762 bytes" --echo # (independent testcase for Oracle Bug#13947868) diff --git a/mysql-test/t/partition_pruning.test b/mysql-test/t/partition_pruning.test index 4c97bab454d..06ef99e1e70 100644 --- a/mysql-test/t/partition_pruning.test +++ b/mysql-test/t/partition_pruning.test @@ -1414,6 +1414,54 @@ explain partitions select * from t1 where a between 10 and 10+33; drop table t0, t1; --echo # +--echo # Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +--echo # +CREATE TABLE t1 +(c1 int, + c2 int, + c3 int, + c4 int, + PRIMARY KEY (c1,c2)) +PARTITION BY LIST COLUMNS (c2) +(PARTITION p1 VALUES IN (1,2), + PARTITION p2 VALUES IN (3,4)); +INSERT INTO t1 VALUES (1, 1, 1, 1), (2, 3, 1, 1); +INSERT INTO t1 VALUES (1, 2, 1, 1), (2, 4, 1, 1); +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +DROP TABLE t1; + +--echo # --echo # MDEV-6239: Partition pruning is not working as expected in an inner query --echo # diff --git a/mysql-test/t/plugin_loaderr.test b/mysql-test/t/plugin_loaderr.test index e319e2fb54d..7b98a94afd4 100644 --- a/mysql-test/t/plugin_loaderr.test +++ b/mysql-test/t/plugin_loaderr.test @@ -1,4 +1,6 @@ +--source include/not_embedded.inc + # We used an invalid command-line option and InnoDB failed to start. # Ignore all related warnings call mtr.add_suppression("InnoDB"); @@ -8,3 +10,17 @@ SELECT PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE,PLUGIN_LIBRARY,PLUGIN_LIBRARY_VERSION,LOAD_OPTION FROM INFORMATION_SCHEMA.PLUGINS WHERE plugin_name = 'innodb'; +--echo # +--echo # MDEV-6351 --plugin=force has no effect for built-in plugins +--echo # +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--shutdown_server +--source include/wait_until_disconnected.inc + +--error 1 +--exec $MYSQLD_CMD --innodb=force --innodb-page-size=6000 + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect diff --git a/mysql-test/t/rpl_mysqldump_slave.test b/mysql-test/t/rpl_mysqldump_slave.test index 77fc4a050cc..345bdb82535 100644 --- a/mysql-test/t/rpl_mysqldump_slave.test +++ b/mysql-test/t/rpl_mysqldump_slave.test @@ -36,4 +36,53 @@ start slave; --exec $MYSQL_DUMP_SLAVE --compact --dump-slave no_such_db start slave; + +--echo *** Test mysqldump --dump-slave GTID functionality. + +--connection master +SET gtid_seq_no = 1000; +CREATE TABLE t1 (a INT PRIMARY KEY); +DROP TABLE t1; +--sync_slave_with_master + +--connection slave +# Inject a local transaction on the slave to check that this is not considered +# for --dump-slave. +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; + +--echo +--echo 1. --dump-slave=1 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=1 --gtid test + +--echo +--echo 2. --dump-slave=2 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=2 --gtid test + + +--echo *** Test mysqldump --master-data GTID functionality. +--echo +--echo 1. --master-data=1 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --master-data=1 --gtid test + +--echo +--echo 2. --master-data=2 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --master-data=2 --gtid test + +--echo +--echo 3. --master-data --single-transaction +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --master-data --single-transaction --gtid test + + + --source include/rpl_end.inc diff --git a/mysql-test/t/stat_tables-enospc.test b/mysql-test/t/stat_tables-enospc.test new file mode 100644 index 00000000000..12e42f6adc0 --- /dev/null +++ b/mysql-test/t/stat_tables-enospc.test @@ -0,0 +1,23 @@ +# +# MDEV-6181 EITS could eat all tmpdir space and hang +# +# test that ANALYZE TABLE is immediately aborted when going out of disk space +--source include/have_debug.inc +call mtr.add_suppression("No space left on device"); +create table t1 (a varchar(255), b varchar(255), c varchar(255)); +--disable_query_log +let $i=10000; +while ($i) { + insert t1 values (repeat(format(rand(),10), 20), + repeat(format(rand(),10), 20), + repeat(format(rand(),10), 20)); + dec $i; +} +--enable_query_log +set use_stat_tables=PREFERABLY, optimizer_use_condition_selectivity=3; +set debug_dbug='+d,simulate_file_write_error'; +--replace_regex /'.*'/'tmp-file'/ +analyze table t1; +set debug_dbug=''; +drop table t1; + diff --git a/mysql-test/t/subselect_sj_mat.test b/mysql-test/t/subselect_sj_mat.test index 91b69a6a09c..912e9d5befd 100644 --- a/mysql-test/t/subselect_sj_mat.test +++ b/mysql-test/t/subselect_sj_mat.test @@ -1808,5 +1808,38 @@ EXECUTE stmt; DROP TABLE t1, t2; DROP VIEW v2; +--echo # +--echo # MDEV-6289 : Unexpected results when querying information_schema +--echo # +CREATE TABLE t1 ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + db varchar(254) NOT NULL DEFAULT '', + PRIMARY KEY (id), + UNIQUE KEY db (db) +) DEFAULT CHARSET=utf8; +INSERT INTO t1 (db) VALUES ('mysqltest1'),('mysqltest2'),('mysqltest3'),('mysqltest4'); + +--disable_warnings +drop database if exists mysqltest1; +drop database if exists mysqltest2; +drop database if exists mysqltest3; +drop database if exists mysqltest4; +--enable_warnings +create database mysqltest1; +create database mysqltest2; +create database mysqltest3; +create database mysqltest4; + +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; + +EXPLAIN EXTENDED +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; + +drop table t1; +drop database mysqltest1; +drop database mysqltest2; +drop database mysqltest3; +drop database mysqltest4; + --echo # End of 5.5 tests diff --git a/mysql-test/t/table_options-5867.test b/mysql-test/t/table_options-5867.test new file mode 100644 index 00000000000..153ec08e675 --- /dev/null +++ b/mysql-test/t/table_options-5867.test @@ -0,0 +1,30 @@ +# +# MDEV-5867 ALTER TABLE t1 ENGINE=InnoDB keeps bad options when t1 ENGINE is CONNECT +# +# verify that SHOW CREATE TABLE hides unknown options when IGNORE_BAD_TABLE_OPTIONS is not set + +--source include/have_example_plugin.inc +--source include/not_embedded.inc + +install soname 'ha_example'; + +set sql_mode='ignore_bad_table_options'; +create table t1 ( + a int complex='c,f,f,f' invalid=3 +) engine=example ull=10000 str='dskj' one_or_two='one' yesno=0 + foobar=barfoo; + +create table t2 (a int, key (a) some_option=2014); + +show create table t1; +show create table t2; + +set sql_mode=''; + +show create table t1; +show create table t2; + +drop table t1, t2; + +uninstall soname 'ha_example'; + diff --git a/mysql-test/t/type_bit.test b/mysql-test/t/type_bit.test index 8bedf9357ef..2ca608e76ff 100644 --- a/mysql-test/t/type_bit.test +++ b/mysql-test/t/type_bit.test @@ -362,11 +362,11 @@ f2 bit(14) NOT NULL default b'11110000111100' SHOW CREATE TABLE t1; DROP TABLE t1; ---error ER_INVALID_DEFAULT CREATE TABLE IF NOT EXISTS t1 ( f1 bit(2) NOT NULL default b'' ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci; - +SHOW CREATE TABLE t1; +DROP TABLE t1; # # Bug#31399 Wrong query result when doing join buffering over BIT fields diff --git a/mysql-test/t/type_bit_innodb.test b/mysql-test/t/type_bit_innodb.test index 7ba90bf08fa..27eaeda0f99 100644 --- a/mysql-test/t/type_bit_innodb.test +++ b/mysql-test/t/type_bit_innodb.test @@ -148,3 +148,12 @@ select * from t1; drop table t1; --echo End of 5.0 tests + +# +# MDEV-6052 Inconsistent results with bit type +# +create table t1(f1 bit(2) not null default b'10',f2 bit(14) not null default b'11110000111100'); +insert into t1 (f1) values (default); +insert into t1 values (b'',b''),('',''); +select hex(f1), hex(f2) from t1; +drop table t1; diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index 877509a9fc0..a5d7dae606f 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -1273,6 +1273,36 @@ SELECT(SELECT 1 AS a ORDER BY a) AS dev; SELECT(SELECT 1 AS a LIMIT 1) AS dev; SELECT(SELECT 1 AS a FROM dual ORDER BY a DESC LIMIT 1) AS dev; + +--echo # +--echo # Bug #17059925 : UNIONS COMPUTES ROWS_EXAMINED INCORRECTLY +--echo # + +## Save current state of slow log variables +SET @old_slow_query_log= @@global.slow_query_log; +SET @old_log_output= @@global.log_output; +SET @old_long_query_time= @@long_query_time; +SET GLOBAL log_output= "TABLE"; +SET GLOBAL slow_query_log= ON; +SET SESSION long_query_time= 0; + +CREATE TABLE t17059925 (a INT); +CREATE TABLE t2 (b INT); +CREATE TABLE t3 (c INT); +INSERT INTO t17059925 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (4), (5), (6); +INSERT INTO t3 VALUES (7), (8), (9); +TRUNCATE table mysql.slow_log; +--sorted_result +SELECT * FROM t17059925 UNION SELECT * FROM t2 UNION SELECT * FROM t3; +SELECT sql_text, rows_examined FROM mysql.slow_log WHERE sql_text LIKE '%SELECT%t17059925%'; +DROP TABLE t17059925, t2, t3; + +## Reset to initial values +SET @@long_query_time= @old_long_query_time; +SET @@global.log_output= @old_log_output; +SET @@global.slow_query_log= @old_slow_query_log; + --echo # --echo # lp:1010729: Unexpected syntax error from UNION --echo # (bug #54382) with single-table join nest diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index d1d4b936aba..6029ad471f6 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -4700,6 +4700,47 @@ DROP DATABASE IF EXISTS nodb; --error ER_BAD_DB_ERROR CREATE VIEW nodb.a AS SELECT 1; + +--echo # +--echo # BUG#14117018 - MYSQL SERVER CREATES INVALID VIEW DEFINITION +--echo # BUG#18405221 - SHOW CREATE VIEW OUTPUT INCORRECT +--echo # + +CREATE VIEW v1 AS (SELECT '' FROM DUAL); +CREATE VIEW v2 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL + (SELECT '' FROM DUAL); +CREATE VIEW v3 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL + (SELECT '' FROM DUAL) UNION ALL + (SELECT '' FROM DUAL); +CREATE VIEW v4 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL + (SELECT '' AS col2 FROM DUAL) UNION ALL + (SELECT '' FROM DUAL); + +# In the second (and later) UNIONed queries, duplicate column names are allowed +CREATE VIEW v5 AS (SELECT 'buggy' AS col1, 'fix' as col2 FROM DUAL) UNION ALL + (SELECT 'buggy' as a, 'fix' as a FROM DUAL); + +--echo # Name for the column in select1 is set properly with or +--echo # without this fix. +SHOW CREATE VIEW v1; + +--echo # Name for the column in select2 is set with this fix. +--echo # Without this fix, name would not have set for the +--echo # columns in select2. +SHOW CREATE VIEW v2; + +--echo # Name for the field item in select2 & select3 is set with this fix. +--echo # Without this fix, name would not have set for the +--echo # columns in select2 & select3. +SHOW CREATE VIEW v3; + +--echo # Name for the field item in select3 is set with this fix. +--echo # Without this fix, name would not have set for the +--echo # columns in select3. +SHOW CREATE VIEW v4; + +DROP VIEW v1, v2, v3, v4, v5; + # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc @@ -5231,6 +5272,69 @@ drop view v1; drop table t1,t2,t3; SET optimizer_switch=@save_optimizer_switch_MDEV_3874; +# +# MDEV-5515: sub-bug test of 3rd execution crash +# + +CREATE TABLE `t1` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `f0` int(11) unsigned NOT NULL DEFAULT '0', + `f1` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `id` (`id`) +); + +CREATE TABLE `t2` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `f02` bigint(20) unsigned NOT NULL DEFAULT '0', + `f03` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `id` (`id`) +); + +CREATE ALGORITHM=UNDEFINED SQL SECURITY DEFINER VIEW `v1` AS + SELECT + `t1`.`f0` AS `f0`, + `t1`.`f1` AS `f1`, + `t2`.`f02` AS `f02`, + `t2`.`f03` AS `f03` + FROM + (`t1` LEFT JOIN `t2` ON((`t1`.`id` = `t2`.`f02`))); + +--delimiter | +CREATE FUNCTION `f1`( + p0 BIGINT(20) UNSIGNED + ) + RETURNS bigint(20) unsigned + DETERMINISTIC + CONTAINS SQL + SQL SECURITY DEFINER + COMMENT '' +BEGIN + +DECLARE k0 INTEGER UNSIGNED DEFAULT 0; +DECLARE lResult INTEGER UNSIGNED DEFAULT 0; + + SET k0 = 0; + WHILE k0 < 1 DO + SELECT COUNT(*) as `f00` INTO lResult FROM `v1` WHERE `v1`.`f0` = p0; -- BUG + SET k0 = k0 + 1; + END WHILE; + + RETURN(k0); +END| +--delimiter ; + + +SELECT `f1`(1); +SELECT `f1`(1); +SELECT `f1`(1); +SELECT `f1`(1); + +DROP FUNCTION f1; +DROP VIEW v1; +DROP TABLE t1, t2; + --echo # ----------------------------------------------------------------- --echo # -- End of 5.5 tests. --echo # ----------------------------------------------------------------- diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index 45499e5891f..f1bc19e27eb 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -1138,6 +1138,17 @@ { + OpenSSL still reachable. + Memcheck:Leak + fun:*alloc + fun:CRYPTO_malloc + obj:*libssl* + fun:SSL_COMP_get_compression_methods + fun:SSL_library_init +} + + +{ Problem with udf and libresolve Memcheck:Cond obj:*/libresolv*.so diff --git a/mysys/CMakeLists.txt b/mysys/CMakeLists.txt index f0d25dae6b9..4756bbccf2f 100644 --- a/mysys/CMakeLists.txt +++ b/mysys/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates +# Copyright (c) 2006, 2014, Oracle and/or its affiliates # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -89,7 +89,6 @@ ADD_EXECUTABLE(thr_lock thr_lock.c) TARGET_LINK_LIBRARIES(thr_lock mysys) SET_TARGET_PROPERTIES(thr_lock PROPERTIES COMPILE_FLAGS "-DMAIN") -INSTALL_DEBUG_SYMBOLS(mysys) IF(MSVC) INSTALL_DEBUG_TARGET(mysys DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/mysys/ma_dyncol.c b/mysys/ma_dyncol.c index 3b5e05f1b01..c0508b97922 100644 --- a/mysys/ma_dyncol.c +++ b/mysys/ma_dyncol.c @@ -1610,7 +1610,7 @@ dynamic_new_column_store(DYNAMIC_COLUMN *str, my_bool new_str) { struct st_service_funcs *fmt= fmt_data + hdr->format; - void **columns_order; + void **UNINIT_VAR(columns_order); uchar *element; uint i; enum enum_dyncol_func_result rc= ER_DYNCOL_RESOURCE; diff --git a/mysys/mf_iocache2.c b/mysys/mf_iocache2.c index 22def2e0923..06dfc9f2079 100644 --- a/mysys/mf_iocache2.c +++ b/mysys/mf_iocache2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -456,6 +456,13 @@ process_flags: goto err; } } + else if (*fmt == 'c') /* char type parameter */ + { + char par[2]; + par[0] = va_arg(args, int); + if (my_b_write(info, (uchar*) par, 1)) + goto err; + } else if (*fmt == 'b') /* Sized buffer parameter, only precision makes sense */ { char *par = va_arg(args, char *); diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index d4c4f8c9997..5505693ce2c 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -149,7 +149,8 @@ typedef struct st_keycache_wqueue struct st_my_thread_var *last_thread; /* circular list of waiting threads */ } KEYCACHE_WQUEUE; -#define CHANGED_BLOCKS_HASH 128 /* must be power of 2 */ +/* Default size of hash for changed files */ +#define MIN_CHANGED_BLOCKS_HASH_SIZE 128 /* Control block for a simple (non-partitioned) key cache */ @@ -165,6 +166,7 @@ typedef struct st_simple_key_cache_cb ulong age_threshold; /* age threshold for hot blocks */ ulonglong keycache_time; /* total number of block link operations */ uint hash_entries; /* max number of entries in the hash table */ + uint changed_blocks_hash_size; /* Number of hash buckets for file blocks */ int hash_links; /* max number of hash links */ int hash_links_used; /* number of hash links currently used */ int disk_blocks; /* max number of blocks in the cache */ @@ -191,8 +193,8 @@ typedef struct st_simple_key_cache_cb KEYCACHE_WQUEUE waiting_for_resize_cnt; KEYCACHE_WQUEUE waiting_for_hash_link; /* waiting for a free hash link */ KEYCACHE_WQUEUE waiting_for_block; /* requests waiting for a free block */ - BLOCK_LINK *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash for dirty file bl.*/ - BLOCK_LINK *file_blocks[CHANGED_BLOCKS_HASH]; /* hash for other file bl.*/ + BLOCK_LINK **changed_blocks; /* hash for dirty file bl.*/ + BLOCK_LINK **file_blocks; /* hash for other file bl.*/ /* Statistics variables. These are reset in reset_key_cache_counters(). */ ulong global_blocks_changed; /* number of currently dirty blocks */ @@ -331,7 +333,7 @@ static void test_key_cache(SIMPLE_KEY_CACHE_CB *keycache, #define KEYCACHE_HASH(f, pos) \ ((KEYCACHE_BASE_EXPR(f, pos) / keycache->hash_factor) & \ (keycache->hash_entries-1)) -#define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1)) +#define FILE_HASH(f, cache) ((uint) (f) & (cache->changed_blocks_hash_size-1)) #define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log" @@ -468,9 +470,10 @@ static inline uint next_power(uint value) */ static -int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_size, +int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, + uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { ulong blocks, hash_links; size_t length; @@ -515,6 +518,11 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si blocks= (ulong) (use_mem / (sizeof(BLOCK_LINK) + 2 * sizeof(HASH_LINK) + sizeof(HASH_LINK*) * 5/4 + key_cache_block_size)); + + /* Changed blocks hash needs to be a power of 2 */ + changed_blocks_hash_size= my_round_up_to_next_power(MY_MAX(changed_blocks_hash_size, + MIN_CHANGED_BLOCKS_HASH_SIZE)); + /* It doesn't make sense to have too few blocks (less than 8) */ if (blocks >= 8) { @@ -531,8 +539,9 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si while ((length= (ALIGN_SIZE(blocks * sizeof(BLOCK_LINK)) + ALIGN_SIZE(hash_links * sizeof(HASH_LINK)) + ALIGN_SIZE(sizeof(HASH_LINK*) * - keycache->hash_entries))) + - ((size_t) blocks * keycache->key_cache_block_size) > use_mem) + keycache->hash_entries) + + sizeof(BLOCK_LINK*)* (changed_blocks_hash_size*2))) + + ((size_t) blocks * keycache->key_cache_block_size) > use_mem && blocks > 8) blocks--; /* Allocate memory for cache page buffers */ if ((keycache->block_mem= @@ -543,8 +552,17 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si Allocate memory for blocks, hash_links and hash entries; For each block 2 hash links are allocated */ - if ((keycache->block_root= (BLOCK_LINK*) my_malloc(length, - MYF(0)))) + if (my_multi_malloc(MYF(MY_ZEROFILL), + &keycache->block_root, blocks * sizeof(BLOCK_LINK), + &keycache->hash_root, + sizeof(HASH_LINK*) * keycache->hash_entries, + &keycache->hash_link_root, + hash_links * sizeof(HASH_LINK), + &keycache->changed_blocks, + sizeof(BLOCK_LINK*) * changed_blocks_hash_size, + &keycache->file_blocks, + sizeof(BLOCK_LINK*) * changed_blocks_hash_size, + NullS)) break; my_large_free(keycache->block_mem); keycache->block_mem= 0; @@ -561,17 +579,6 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si keycache->blocks_unused= blocks; keycache->disk_blocks= (int) blocks; keycache->hash_links= hash_links; - keycache->hash_root= (HASH_LINK**) ((char*) keycache->block_root + - ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))); - keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root + - ALIGN_SIZE((sizeof(HASH_LINK*) * - keycache->hash_entries))); - bzero((uchar*) keycache->block_root, - keycache->disk_blocks * sizeof(BLOCK_LINK)); - bzero((uchar*) keycache->hash_root, - keycache->hash_entries * sizeof(HASH_LINK*)); - bzero((uchar*) keycache->hash_link_root, - keycache->hash_links * sizeof(HASH_LINK)); keycache->hash_links_used= 0; keycache->free_hash_list= NULL; keycache->blocks_used= keycache->blocks_changed= 0; @@ -591,7 +598,7 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si keycache->age_threshold= (age_threshold ? blocks * age_threshold / 100 : blocks); - + keycache->changed_blocks_hash_size= changed_blocks_hash_size; keycache->can_be_used= 1; keycache->waiting_for_hash_link.last_thread= NULL; @@ -602,10 +609,6 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si keycache->disk_blocks, (long) keycache->block_root, keycache->hash_entries, (long) keycache->hash_root, keycache->hash_links, (long) keycache->hash_link_root)); - bzero((uchar*) keycache->changed_blocks, - sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH); - bzero((uchar*) keycache->file_blocks, - sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH); } else { @@ -832,9 +835,10 @@ void finish_resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, */ static -int resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_size, +int resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, + uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { int blocks= 0; DBUG_ENTER("resize_simple_key_cache"); @@ -852,7 +856,8 @@ int resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_ /* The following will work even if use_mem is 0 */ blocks= init_simple_key_cache(keycache, key_cache_block_size, use_mem, - division_limit, age_threshold); + division_limit, age_threshold, + changed_blocks_hash_size); finish: finish_resize_simple_key_cache(keycache, 0); @@ -1248,7 +1253,7 @@ static void link_to_file_list(SIMPLE_KEY_CACHE_CB *keycache, DBUG_ASSERT(block->hash_link->file == file); if (unlink_block) unlink_changed(block); - link_changed(block, &keycache->file_blocks[FILE_HASH(file)]); + link_changed(block, &keycache->file_blocks[FILE_HASH(file, keycache)]); if (block->status & BLOCK_CHANGED) { block->status&= ~BLOCK_CHANGED; @@ -1289,7 +1294,7 @@ static void link_to_changed_list(SIMPLE_KEY_CACHE_CB *keycache, unlink_changed(block); link_changed(block, - &keycache->changed_blocks[FILE_HASH(block->hash_link->file)]); + &keycache->changed_blocks[FILE_HASH(block->hash_link->file, keycache)]); block->status|=BLOCK_CHANGED; keycache->blocks_changed++; keycache->global_blocks_changed++; @@ -3901,7 +3906,7 @@ static int flush_key_blocks_int(SIMPLE_KEY_CACHE_CB *keycache, to flush all dirty pages with minimum seek moves */ count= 0; - for (block= keycache->changed_blocks[FILE_HASH(file)] ; + for (block= keycache->changed_blocks[FILE_HASH(file, keycache)] ; block ; block= block->next_changed) { @@ -3934,7 +3939,7 @@ restart: last_in_flush= NULL; last_for_update= NULL; end= (pos= cache)+count; - for (block= keycache->changed_blocks[FILE_HASH(file)] ; + for (block= keycache->changed_blocks[FILE_HASH(file, keycache)] ; block ; block= next) { @@ -4156,7 +4161,7 @@ restart: do { found= 0; - for (block= keycache->file_blocks[FILE_HASH(file)] ; + for (block= keycache->file_blocks[FILE_HASH(file, keycache)] ; block ; block= next) { @@ -4397,6 +4402,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) uint total_found; uint found; uint idx; + uint changed_blocks_hash_size= keycache->changed_blocks_hash_size; DBUG_ENTER("flush_all_key_blocks"); do @@ -4412,7 +4418,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) { found= 0; /* Step over the whole changed_blocks hash array. */ - for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) + for (idx= 0; idx < changed_blocks_hash_size; idx++) { /* If an array element is non-empty, use the first block from its @@ -4423,7 +4429,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) same hash bucket, one of them will be flushed per iteration of the outer loop of phase 1. */ - if ((block= keycache->changed_blocks[idx])) + while ((block= keycache->changed_blocks[idx])) { found++; /* @@ -4435,7 +4441,6 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) DBUG_RETURN(1); } } - } while (found); /* @@ -4450,7 +4455,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) { found= 0; /* Step over the whole file_blocks hash array. */ - for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) + for (idx= 0; idx < changed_blocks_hash_size; idx++) { /* If an array element is non-empty, use the first block from its @@ -4460,7 +4465,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) same hash bucket, one of them will be flushed per iteration of the outer loop of phase 2. */ - if ((block= keycache->file_blocks[idx])) + while ((block= keycache->file_blocks[idx])) { total_found++; found++; @@ -4469,7 +4474,6 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) DBUG_RETURN(1); } } - } while (found); /* @@ -4482,7 +4486,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) #ifndef DBUG_OFF /* Now there should not exist any block any more. */ - for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) + for (idx= 0; idx < changed_blocks_hash_size; idx++) { DBUG_ASSERT(!keycache->changed_blocks[idx]); DBUG_ASSERT(!keycache->file_blocks[idx]); @@ -5028,15 +5032,18 @@ static SIMPLE_KEY_CACHE_CB age_threshold age threshold (may be zero) DESCRIPTION - This function is the implementation of the init_key_cache interface function - that is employed by partitioned key caches. - The function builds and initializes an array of simple key caches, and then - initializes the control block structure of the type PARTITIONED_KEY_CACHE_CB - that is used for a partitioned key cache. The parameter keycache is - supposed to point to this structure. The number of partitions in the - partitioned key cache to be built must be passed through the field - 'partitions' of this structure. The parameter key_cache_block_size specifies - the size of the blocks in the the simple key caches to be built. + This function is the implementation of the init_key_cache + interface function that is employed by partitioned key caches. + + The function builds and initializes an array of simple key caches, + and then initializes the control block structure of the type + PARTITIONED_KEY_CACHE_CB that is used for a partitioned key + cache. The parameter keycache is supposed to point to this + structure. The number of partitions in the partitioned key cache + to be built must be passed through the field 'partitions' of this + structure. + The parameter key_cache_block_size specifies the size of the + blocks in the the simple key caches to be built. The parameters division_limit and age_threshold determine the initial values of those characteristics of the simple key caches that are used for midpoint insertion strategy. The parameter use_mem specifies the total @@ -5059,7 +5066,7 @@ static int init_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { int i; size_t mem_per_cache; @@ -5103,7 +5110,8 @@ int init_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, } cnt= init_simple_key_cache(partition, key_cache_block_size, mem_per_cache, - division_limit, age_threshold); + division_limit, age_threshold, + changed_blocks_hash_size); if (cnt <= 0) { end_simple_key_cache(partition, 1); @@ -5222,7 +5230,8 @@ static int resize_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, + uint changed_blocks_hash_size) { uint i; uint partitions= keycache->partitions; @@ -5241,7 +5250,8 @@ int resize_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, } if (!err) blocks= init_partitioned_key_cache(keycache, key_cache_block_size, - use_mem, division_limit, age_threshold); + use_mem, division_limit, age_threshold, + changed_blocks_hash_size); if (blocks > 0) { for (i= 0; i < partitions; i++) @@ -5816,6 +5826,7 @@ static int repartition_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size, uint partitions, my_bool use_op_lock); /* @@ -5828,8 +5839,11 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, use_mem total memory to use for cache buffers/structures division_limit division limit (may be zero) age_threshold age threshold (may be zero) - partitions number of partitions in the key cache - use_op_lock if TRUE use keycache->op_lock, otherwise - ignore it + changed_blocks_hash_size Number of hash buckets to hold a link of different + files. Should be proportional to number of different + files sused. + partitions Number of partitions in the key cache + use_op_lock if TRUE use keycache->op_lock, otherwise - ignore it DESCRIPTION The function performs the actions required from init_key_cache(). @@ -5850,7 +5864,8 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, static int init_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions, + uint age_threshold, uint changed_blocks_hash_size, + uint partitions, my_bool use_op_lock) { void *keycache_cb; @@ -5901,7 +5916,7 @@ int init_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, keycache->can_be_used= 0; blocks= keycache->interface_funcs->init(keycache_cb, key_cache_block_size, use_mem, division_limit, - age_threshold); + age_threshold, changed_blocks_hash_size); keycache->partitions= partitions ? ((PARTITIONED_KEY_CACHE_CB *) keycache_cb)->partitions : 0; @@ -5956,10 +5971,12 @@ int init_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions) + uint age_threshold, uint changed_blocks_hash_size, + uint partitions) { return init_key_cache_internal(keycache, key_cache_block_size, use_mem, - division_limit, age_threshold, partitions, 1); + division_limit, age_threshold, + changed_blocks_hash_size, partitions, 1); } @@ -5998,7 +6015,8 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, */ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, - size_t use_mem, uint division_limit, uint age_threshold) + size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size) { int blocks= -1; if (keycache->key_cache_inited) @@ -6008,6 +6026,7 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, blocks= repartition_key_cache_internal(keycache, key_cache_block_size, use_mem, division_limit, age_threshold, + changed_blocks_hash_size, (uint) keycache->param_partitions, 0); else @@ -6015,7 +6034,8 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, blocks= keycache->interface_funcs->resize(keycache->keycache_cb, key_cache_block_size, use_mem, division_limit, - age_threshold); + age_threshold, + changed_blocks_hash_size); if (keycache->partitions) keycache->partitions= @@ -6453,6 +6473,7 @@ static int repartition_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size, uint partitions, my_bool use_op_lock) { uint blocks= -1; @@ -6462,10 +6483,12 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, pthread_mutex_lock(&keycache->op_lock); keycache->interface_funcs->resize(keycache->keycache_cb, key_cache_block_size, 0, - division_limit, age_threshold); + division_limit, age_threshold, + changed_blocks_hash_size); end_key_cache_internal(keycache, 1, 0); blocks= init_key_cache_internal(keycache, key_cache_block_size, use_mem, - division_limit, age_threshold, partitions, + division_limit, age_threshold, + changed_blocks_hash_size, partitions, 0); if (use_op_lock) pthread_mutex_unlock(&keycache->op_lock); @@ -6510,10 +6533,12 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, int repartition_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions) + uint age_threshold, uint changed_blocks_hash_size, + uint partitions) { return repartition_key_cache_internal(keycache, key_cache_block_size, use_mem, division_limit, age_threshold, + changed_blocks_hash_size, partitions, 1); } diff --git a/mysys/my_default.c b/mysys/my_default.c index 4721382acc2..f383a3ce1e6 100644 --- a/mysys/my_default.c +++ b/mysys/my_default.c @@ -144,9 +144,8 @@ static int search_default_file_with_ext(Process_option_func func, - Windows: GetWindowsDirectory() - Windows: C:/ - Windows: Directory above where the executable is located - - Unix: /etc/ - - Unix: /etc/mysql/ - - Unix: --sysconfdir=<path> (compile-time option) + - Unix: /etc/ or the value of DEFAULT_SYSCONFDIR, if defined + - Unix: /etc/mysql/ unless DEFAULT_SYSCONFDIR is defined - ALL: getenv("MYSQL_HOME") - ALL: --defaults-extra-file=<path> (run-time option) - Unix: ~/ @@ -1236,12 +1235,12 @@ static const char **init_default_directories(MEM_ROOT *alloc) #else - errors += add_directory(alloc, "/etc/", dirs); - errors += add_directory(alloc, "/etc/mysql/", dirs); - #if defined(DEFAULT_SYSCONFDIR) if (DEFAULT_SYSCONFDIR[0]) errors += add_directory(alloc, DEFAULT_SYSCONFDIR, dirs); +#else + errors += add_directory(alloc, "/etc/", dirs); + errors += add_directory(alloc, "/etc/mysql/", dirs); #endif /* DEFAULT_SYSCONFDIR */ #endif diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c index 5007cb01689..d9dbacc8524 100644 --- a/mysys/my_thr_init.c +++ b/mysys/my_thr_init.c @@ -38,22 +38,6 @@ mysql_mutex_t LOCK_localtime_r; #ifdef _MSC_VER static void install_sigabrt_handler(); #endif -#ifdef TARGET_OS_LINUX - -/* - Dummy thread spawned in my_thread_global_init() below to avoid - race conditions in NPTL pthread_exit code. -*/ - -static pthread_handler_t -nptl_pthread_exit_hack_handler(void *arg __attribute((unused))) -{ - /* Do nothing! */ - pthread_exit(0); - return 0; -} - -#endif /* TARGET_OS_LINUX */ static uint get_thread_lib(void); @@ -197,33 +181,6 @@ my_bool my_thread_global_init(void) thd_lib_detected= get_thread_lib(); -#ifdef TARGET_OS_LINUX - /* - BUG#24507: Race conditions inside current NPTL pthread_exit() - implementation. - - To avoid a possible segmentation fault during concurrent - executions of pthread_exit(), a dummy thread is spawned which - initializes internal variables of pthread lib. See bug description - for a full explanation. - - TODO: Remove this code when fixed versions of glibc6 are in common - use. - */ - if (thd_lib_detected == THD_LIB_NPTL) - { - pthread_t dummy_thread; - pthread_attr_t dummy_thread_attr; - - pthread_attr_init(&dummy_thread_attr); - pthread_attr_setdetachstate(&dummy_thread_attr, PTHREAD_CREATE_JOINABLE); - - if (pthread_create(&dummy_thread,&dummy_thread_attr, - nptl_pthread_exit_hack_handler, NULL) == 0) - (void)pthread_join(dummy_thread, NULL); - } -#endif /* TARGET_OS_LINUX */ - my_thread_init_common_mutex(); return 0; diff --git a/packaging/rpm-oel/filter-requires.sh b/packaging/rpm-oel/filter-requires.sh index 521eb0ca7d9..3fdf43870fa 100755 --- a/packaging/rpm-oel/filter-requires.sh +++ b/packaging/rpm-oel/filter-requires.sh @@ -2,5 +2,5 @@ # /usr/lib/rpm/perl.req $* | -sed -e '/perl(hostnames)/d' -e '/perl(lib::mtr.*/d' -e '/perl(lib::v1.*/d' -e '/perl(mtr_.*/d' -e '/perl(My::.*/d' +sed -e '/perl(GD)/d' -e '/perl(hostnames)/d' -e '/perl(lib::mtr.*/d' -e '/perl(lib::v1.*/d' -e '/perl(mtr_.*/d' -e '/perl(My::.*/d' diff --git a/packaging/rpm-oel/mysql.spec.in b/packaging/rpm-oel/mysql.spec.in index d28e89b4216..ba57515678c 100644 --- a/packaging/rpm-oel/mysql.spec.in +++ b/packaging/rpm-oel/mysql.spec.in @@ -85,7 +85,7 @@ Name: mysql-%{product_suffix} Summary: A very fast and reliable SQL database server Group: Applications/Databases Version: @VERSION@ -Release: 2%{?commercial:.1}%{?dist} +Release: 4%{?commercial:.1}%{?dist} License: Copyright (c) 2000, @MYSQL_COPYRIGHT_YEAR@, %{mysql_vendor}. All rights reserved. Under %{?license_type} license as shown in the Description field. Source0: https://cdn.mysql.com/Downloads/MySQL-@MYSQL_BASE_VERSION@/%{src_dir}.tar.gz URL: http://www.mysql.com/ @@ -118,7 +118,7 @@ BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) %if 0%{?rhel} > 6 # For rpm => 4.9 only: https://fedoraproject.org/wiki/Packaging:AutoProvidesAndRequiresFiltering -%global __requires_exclude ^perl\\((hostnames|lib::mtr|lib::v1|mtr_|My::) +%global __requires_exclude ^perl\\((GD|hostnames|lib::mtr|lib::v1|mtr_|My::) %global __provides_exclude_from ^(/usr/share/(mysql|mysql-test)/.*|%{_libdir}/mysql/plugin/.*\\.so)$ %else # https://fedoraproject.org/wiki/EPEL:Packaging#Generic_Filtering_on_EPEL6 @@ -166,6 +166,7 @@ Requires: mysql-community-common%{?_isa} = %{version}-%{release} Obsoletes: MySQL-server < %{version}-%{release} Obsoletes: mysql-server < %{version}-%{release} Obsoletes: mariadb-server +Obsoletes: mariadb-galera-server Provides: mysql-server = %{version}-%{release} Provides: mysql-server%{?_isa} = %{version}-%{release} %if 0%{?systemd} @@ -262,6 +263,25 @@ This package contains the MySQL regression test suite for MySQL database server. +%package bench +Summary: MySQL benchmark suite +Group: Applications/Databases +%if 0%{?commercial} +Obsoletes: mysql-community-bench < %{version}-%{release} +Requires: mysql-enterprise-server%{?_isa} = %{version}-%{release} +%else +Requires: mysql-community-server%{?_isa} = %{version}-%{release} +%endif +Obsoletes: mariadb-bench +Obsoletes: community-mysql-bench < %{version}-%{release} +Obsoletes: mysql-bench < %{version}-%{release} +Provides: mysql-bench = %{version}-%{release} +Provides: mysql-bench%{?_isa} = %{version}-%{release} + +%description bench +This package contains the MySQL Benchmark Suite for MySQL database +server. + %package devel Summary: Development header files and libraries for MySQL database client applications Group: Applications/Databases @@ -344,6 +364,7 @@ Requires: mysql-enterprise-common%{?_isa} = %{version}-%{release} Provides: MySQL-embedded%{?_isa} = %{version}-%{release} Requires: mysql-community-common%{?_isa} = %{version}-%{release} %endif +Obsoletes: mariadb-embedded Obsoletes: MySQL-embedded < %{version}-%{release} Obsoletes: mysql-embedded < %{version}-%{release} Provides: mysql-embedded = %{version}-%{release} @@ -372,6 +393,7 @@ Requires: mysql-enterprise-embedded%{?_isa} = %{version}-%{release} Requires: mysql-community-devel%{?_isa} = %{version}-%{release} Requires: mysql-community-embedded%{?_isa} = %{version}-%{release} %endif +Obsoletes: mariadb-embedded-devel Obsoletes: mysql-embedded-devel < %{version}-%{release} Provides: mysql-embedded-devel = %{version}-%{release} Provides: mysql-embedded-devel%{?_isa} = %{version}-%{release} @@ -472,11 +494,13 @@ mkdir debug cmake ../%{src_dir} \ -DBUILD_CONFIG=mysql_release \ -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=Debug %{?el7:-DENABLE_DTRACE=OFF} \ + -DCMAKE_BUILD_TYPE=Debug \ + -DENABLE_DTRACE=OFF \ -DCMAKE_C_FLAGS="$optflags" \ -DCMAKE_CXX_FLAGS="$optflags" \ -DINSTALL_LIBDIR="%{_lib}/mysql" \ -DINSTALL_PLUGINDIR="%{_lib}/mysql/plugin" \ + -DINSTALL_SQLBENCHDIR=share \ -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ -DFEATURE_SET="%{feature_set}" \ -DWITH_EMBEDDED_SERVER=1 \ @@ -495,11 +519,13 @@ mkdir release cmake ../%{src_dir} \ -DBUILD_CONFIG=mysql_release \ -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo %{?el7:-DENABLE_DTRACE=OFF} \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DENABLE_DTRACE=OFF \ -DCMAKE_C_FLAGS="%{optflags}" \ -DCMAKE_CXX_FLAGS="%{optflags}" \ -DINSTALL_LIBDIR="%{_lib}/mysql" \ -DINSTALL_PLUGINDIR="%{_lib}/mysql/plugin" \ + -DINSTALL_SQLBENCHDIR=share \ -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ -DFEATURE_SET="%{feature_set}" \ -DWITH_EMBEDDED_SERVER=1 \ @@ -862,6 +888,10 @@ fi %attr(644, root, root) %{_mandir}/man1/mysql_client_test_embedded.1* %attr(644, root, root) %{_mandir}/man1/mysqltest_embedded.1* +%files bench +%defattr(-, root, root, -) +%{_datadir}/sql-bench + %files embedded %defattr(-, root, root, -) %dir %attr(755, root, root) %{_libdir}/mysql @@ -881,6 +911,19 @@ fi %endif %changelog +* Tue Jul 08 2014 Balasubramanian Kandasamy <balasubramanian.kandasamy@oracle.com> - 5.5.39-4 +- Remove perl(GD) and dtrace dependencies + +* Tue Jul 01 2014 Bjorn Munch <bjorn.munch@oracle.com> - 5.5.39-3 +- Disable dtrace, as it fails on OEL6 boxes with Oracle dtrace installed + +* Thu Jun 26 2014 Balasubramanian Kandasamy <balasubramanian.kandasamy@oracle.com> - 5.5.39-2 +- Resolve embedded-devel conflict issue + +* Wed Jun 25 2014 Balasubramanian Kandasamy <balasubramanian.kandasamy@oracle.com> - 5.5.39-1 +- Add bench package +- Enable dtrace + * Sun May 11 2014 Balasubramanian Kandasamy <balasubramanian.kandasamy@oracle.com> - 5.5.38-2 - Increment release version to resolve upgrade conflict issue diff --git a/packaging/rpm-uln/CMakeLists.txt b/packaging/rpm-uln/CMakeLists.txt deleted file mode 100644 index c8f13379697..00000000000 --- a/packaging/rpm-uln/CMakeLists.txt +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - -IF(UNIX) - SET(prefix ${CMAKE_INSTALL_PREFIX}) - - SET(SPECFILENAME "mysql.${VERSION}.spec") - IF("${VERSION}" MATCHES "-ndb-") - STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}") - SET(SPECFILENAME "mysql-cluster-${NDBVERSION}.spec") - ENDIF() - - # Left in current directory, to be taken during build - CONFIGURE_FILE(mysql.spec.sh ${CMAKE_CURRENT_BINARY_DIR}/${SPECFILENAME} @ONLY) - - FOREACH(ulnfile filter-requires-mysql.sh generate-tarball.sh my.cnf my_config.h - mysql-5.5-errno.patch mysql-5.5-fix-tests.patch mysql-5.5-libdir.patch - mysql-5.5-mtr1.patch mysql-5.5-stack-guard.patch mysql-5.5-testing.patch - mysql-chain-certs.patch mysql-embedded-check.c mysql-expired-certs.patch - mysql.init mysql-install-test.patch mysql-strmov.patch scriptstub.c - README.mysql-docs) - CONFIGURE_FILE(${ulnfile} ${CMAKE_CURRENT_BINARY_DIR}/${ulnfile} COPYONLY) - ENDFOREACH() -ENDIF() - diff --git a/packaging/rpm-uln/README-ULN b/packaging/rpm-uln/README-ULN deleted file mode 100644 index 8ae44a18605..00000000000 --- a/packaging/rpm-uln/README-ULN +++ /dev/null @@ -1,15 +0,0 @@ -In order to have RPMs of MySQL which are distributed via ULN for Oracle Linux -to be as closely compatible to such RPMs built and distributed by RedHat, -this directory contains additional files which originated at RedHat -and are used only for such RPMs intended for distribution via ULN. - -Especially, this directory contains the spec file used to build these RPMs, -named "mysql.spec". Please regard the following note: - - You are receiving a copy of the Red Hat spec file. - The terms of the Oracle license do NOT apply to the Red Hat spec file; - it is licensed under the - GNU GENERAL PUBLIC LICENSE Version 2, June 1991 - separately from the Oracle programs you receive. - - diff --git a/packaging/rpm-uln/README.mysql-docs b/packaging/rpm-uln/README.mysql-docs deleted file mode 100644 index dd894a7b9c0..00000000000 --- a/packaging/rpm-uln/README.mysql-docs +++ /dev/null @@ -1,4 +0,0 @@ -The official MySQL documentation is not freely redistributable, so we cannot -include it in RHEL or Fedora. You can find it on-line at - -http://dev.mysql.com/doc/ diff --git a/packaging/rpm-uln/filter-requires-mysql.sh b/packaging/rpm-uln/filter-requires-mysql.sh deleted file mode 100755 index d435062b8dc..00000000000 --- a/packaging/rpm-uln/filter-requires-mysql.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -/usr/lib/rpm/perl.req $* | grep -v -e "perl(th" -e "perl(lib::mtr" -e "perl(mtr" diff --git a/packaging/rpm-uln/generate-tarball.sh b/packaging/rpm-uln/generate-tarball.sh deleted file mode 100755 index 2ff4bff2349..00000000000 --- a/packaging/rpm-uln/generate-tarball.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -VERSION=$1 - -rm -rf mysql-$VERSION - -tar xfz mysql-$VERSION.tar.gz || exit 1 - -rm mysql-$VERSION/Docs/mysql.info - -tar cfz mysql-$VERSION-nodocs.tar.gz mysql-$VERSION || exit 1 - -rm -rf mysql-$VERSION - -exit 0 diff --git a/packaging/rpm-uln/my.cnf b/packaging/rpm-uln/my.cnf deleted file mode 100644 index fae0fa276e1..00000000000 --- a/packaging/rpm-uln/my.cnf +++ /dev/null @@ -1,10 +0,0 @@ -[mysqld] -datadir=/var/lib/mysql -socket=/var/lib/mysql/mysql.sock -user=mysql -# Disabling symbolic-links is recommended to prevent assorted security risks -symbolic-links=0 - -[mysqld_safe] -log-error=/var/log/mysqld.log -pid-file=/var/run/mysqld/mysqld.pid diff --git a/packaging/rpm-uln/my_config.h b/packaging/rpm-uln/my_config.h deleted file mode 100644 index 435a126ac97..00000000000 --- a/packaging/rpm-uln/my_config.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Kluge to support multilib installation of both 32- and 64-bit RPMS: - * we need to arrange that header files that appear in both RPMs are - * identical. Hence, this file is architecture-independent and calls - * in an arch-dependent file that will appear in just one RPM. - * - * To avoid breaking arches not explicitly supported by Red Hat, we - * use this indirection file *only* on known multilib arches. - * - * Note: this may well fail if user tries to use gcc's -I- option. - * But that option is deprecated anyway. - */ -#if defined(__x86_64__) -#include "my_config_x86_64.h" -#elif defined(__i386__) -#include "my_config_i386.h" -#elif defined(__ppc64__) || defined(__powerpc64__) -#include "my_config_ppc64.h" -#elif defined(__ppc__) || defined(__powerpc__) -#include "my_config_ppc.h" -#elif defined(__s390x__) -#include "my_config_s390x.h" -#elif defined(__s390__) -#include "my_config_s390.h" -#elif defined(__sparc__) && defined(__arch64__) -#include "my_config_sparc64.h" -#elif defined(__sparc__) -#include "my_config_sparc.h" -#endif diff --git a/packaging/rpm-uln/mysql-5.5-errno.patch b/packaging/rpm-uln/mysql-5.5-errno.patch deleted file mode 100644 index 033e5195973..00000000000 --- a/packaging/rpm-uln/mysql-5.5-errno.patch +++ /dev/null @@ -1,21 +0,0 @@ -"extern int errno" is just a really bad idea. - - -diff -Naur mysql-5.1.32.orig/include/my_sys.h mysql-5.1.32/include/my_sys.h ---- mysql-5.1.32.orig/include/my_sys.h 2009-02-13 19:52:19.000000000 -0500 -+++ mysql-5.1.32/include/my_sys.h 2009-03-04 18:08:40.000000000 -0500 -@@ -199,13 +199,8 @@ - #define my_afree(PTR) my_free(PTR) - #endif /* HAVE_ALLOCA */ - --#ifndef errno /* did we already get it? */ --#ifdef HAVE_ERRNO_AS_DEFINE - #include <errno.h> /* errno is a define */ --#else --extern int errno; /* declare errno */ --#endif --#endif /* #ifndef errno */ -+ - extern char *home_dir; /* Home directory for user */ - extern const char *my_progname; /* program-name (printed in errors) */ - extern char curr_dir[]; /* Current directory for user */ diff --git a/packaging/rpm-uln/mysql-5.5-fix-tests.patch b/packaging/rpm-uln/mysql-5.5-fix-tests.patch deleted file mode 100644 index a1ab7a82210..00000000000 --- a/packaging/rpm-uln/mysql-5.5-fix-tests.patch +++ /dev/null @@ -1,34 +0,0 @@ -Adapt tests (where needed) to RedHat conventions. - -1) The RedHat convention uses the package name "mysql*" whereas upstream uses "MySQL*". - Test "file_contents" constructs path names and needs to be adapted. - -=== modified file 'mysql-test/t/file_contents.test' ---- mysql-5.5.17-orig/mysql-test/t/file_contents.test 2011-10-10 12:03:29 +0000 -+++ mysql-5.5.17/mysql-test/t/file_contents.test 2011-11-16 18:07:55 +0000 -@@ -17,20 +17,20 @@ if ($dir_bin =~ m|/usr/|) { - $dir_docs =~ s|/lib|/share/doc|; - if(-d "$dir_docs/packages") { - # SuSE: "packages/" in the documentation path -- $dir_docs = glob "$dir_docs/packages/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/packages/mysql-server*"; - } else { - # RedHat: version number in directory name -- $dir_docs = glob "$dir_docs/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/mysql-server*"; - } - } elsif ($dir_bin =~ m|/usr$|) { - # RPM build during development - $dir_docs = "$dir_bin/share/doc"; - if(-d "$dir_docs/packages") { - # SuSE: "packages/" in the documentation path -- $dir_docs = glob "$dir_docs/packages/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/packages/mysql-server*"; - } else { - # RedHat: version number in directory name -- $dir_docs = glob "$dir_docs/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/mysql-server*"; - } - } else { - # tar.gz package, Windows, or developer work (in BZR) - diff --git a/packaging/rpm-uln/mysql-5.5-libdir.patch b/packaging/rpm-uln/mysql-5.5-libdir.patch deleted file mode 100644 index 2ab3e9eec27..00000000000 --- a/packaging/rpm-uln/mysql-5.5-libdir.patch +++ /dev/null @@ -1,28 +0,0 @@ -The RPMs built by MySQL AB (-> Sun -> Oracle) put the libraries into "/usr/lib". -Those built by RedHat put them into "/usr/lib/mysql". -This patch is to modify the cmake files to follow the RedHat convention. -Similar, the server is now in "/usr/libexec" (formerly "/usr/sbin"). - - -diff -Naur mysql-5.5.17.orig/cmake/install_layout.cmake mysql-5.5.17/cmake/install_layout.cmake ---- mysql-5.5.17.orig/cmake/install_layout.cmake 2011-06-30 15:46:53 +0000 -+++ mysql-5.5.17/cmake/install_layout.cmake 2011-10-27 16:40:10 +0000 -@@ -140,14 +140,14 @@ SET(INSTALL_SBINDIR_RPM - # be applied at build time via "rpmbuild". - # - SET(INSTALL_BINDIR_RPM "bin") --SET(INSTALL_SBINDIR_RPM "sbin") -+SET(INSTALL_SBINDIR_RPM "libexec") - SET(INSTALL_SCRIPTDIR_RPM "bin") - # - IF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") -- SET(INSTALL_LIBDIR_RPM "lib64") -+ SET(INSTALL_LIBDIR_RPM "lib64/mysql") - SET(INSTALL_PLUGINDIR_RPM "lib64/mysql/plugin") - ELSE() -- SET(INSTALL_LIBDIR_RPM "lib") -+ SET(INSTALL_LIBDIR_RPM "lib/mysql") - SET(INSTALL_PLUGINDIR_RPM "lib/mysql/plugin") - ENDIF() - # - diff --git a/packaging/rpm-uln/mysql-5.5-mtr1.patch b/packaging/rpm-uln/mysql-5.5-mtr1.patch deleted file mode 100644 index 7a7dc85f16c..00000000000 --- a/packaging/rpm-uln/mysql-5.5-mtr1.patch +++ /dev/null @@ -1,25 +0,0 @@ -Drop support for version 1 of "mysql-test-run.pl" from the RPMs: - -1) The auto-generation of Perl dependencies will mishandle that code, - probably because its run directory differs from its storage location. -2) It does not provide several variables which are used in tests of MySQL 5.5 - -If you really need it, take it from the source tarball. - -=== modified file 'mysql-test/mysql-test-run.pl' ---- mysql-5.5.17-orig/mysql-test/mysql-test-run.pl 2011-10-03 11:16:40 +0000 -+++ mysql-5.5.17/mysql-test/mysql-test-run.pl 2011-11-16 19:06:38 +0000 -@@ -58,10 +58,9 @@ BEGIN { - if ( $version == 1 ) - { - print "=======================================================\n"; -- print " WARNING: Using mysql-test-run.pl version 1! \n"; -+ print " ERROR: Support for version 1 is dropped in this distribution! \n"; - print "=======================================================\n"; -- # Should use exec() here on *nix but this appears not to work on Windows -- exit(system($^X, "lib/v1/mysql-test-run.pl", @ARGV) >> 8); -+ exit(1); - } - elsif ( $version == 2 ) - { - diff --git a/packaging/rpm-uln/mysql-5.5-stack-guard.patch b/packaging/rpm-uln/mysql-5.5-stack-guard.patch deleted file mode 100644 index b2624d982de..00000000000 --- a/packaging/rpm-uln/mysql-5.5-stack-guard.patch +++ /dev/null @@ -1,140 +0,0 @@ -mysql is not accounting for the "guard page" when setting thread stack size -requests. This is fatal on PPC systems, which may use guard pages as large -as 64K. This patch also documents the IA64 situation a bit better. - -Note: there are quite a few other setstacksize calls besides the two in -mysqld.cc; is it important to fix any of the others? - -Filed upstream at http://bugs.mysql.com/bug.php?id=35019 - - -diff -Naur mysql-5.1.30.orig/sql/mysqld.cc mysql-5.1.30/sql/mysqld.cc ---- mysql-5.1.30.orig/sql/mysqld.cc 2008-11-14 11:37:13.000000000 -0500 -+++ mysql-5.1.30/sql/mysqld.cc 2009-01-13 12:08:35.000000000 -0500 -@@ -2653,6 +2653,70 @@ - } - - -+/* pthread_attr_setstacksize without so much platform-dependency */ -+/* returns the actual stack size if possible */ -+static size_t my_setstacksize(pthread_attr_t *attr, size_t stacksize) -+{ -+ size_t guard_size = 0; -+ -+#if defined(__ia64__) || defined(__ia64) -+ /* -+ On IA64, half of the requested stack size is used for "normal stack" -+ and half for "register stack". The space measured by check_stack_overrun -+ is the "normal stack", so double the request to make sure we have the -+ caller-expected amount of normal stack. -+ -+ NOTE: there is no guarantee that the register stack can't grow faster -+ than normal stack, so it's very unclear that we won't dump core due to -+ stack overrun despite check_stack_overrun's efforts. Experimentation -+ shows that in the execution_constants test, the register stack grows -+ less than half as fast as normal stack, but perhaps other scenarios are -+ less forgiving. If it turns out that more space is needed for the -+ register stack, that could be forced (rather inefficiently) by using a -+ multiplier higher than 2 here. -+ */ -+ stacksize *= 2; -+#endif -+ -+ /* -+ On many machines, the "guard space" is subtracted from the requested -+ stack size, and that space is quite large on some platforms. So add -+ it to our request, if we can find out what it is. -+ -+ FIXME: autoconfiscate use of pthread_attr_getguardsize -+ */ -+ if (pthread_attr_getguardsize(attr, &guard_size)) -+ guard_size = 0; /* if can't find it out, treat as 0 */ -+ -+ pthread_attr_setstacksize(attr, stacksize + guard_size); -+ -+ /* Retrieve actual stack size if possible */ -+#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE -+ { -+ size_t real_stack_size= 0; -+ /* We must ignore real_stack_size = 0 as Solaris 2.9 can return 0 here */ -+ if (pthread_attr_getstacksize(attr, &real_stack_size) == 0 && -+ real_stack_size > guard_size) -+ { -+ real_stack_size -= guard_size; -+ if (real_stack_size < stacksize) -+ { -+ if (global_system_variables.log_warnings) -+ sql_print_warning("Asked for %ld thread stack, but got %ld", -+ (long) stacksize, (long) real_stack_size); -+ stacksize= real_stack_size; -+ } -+ } -+ } -+#endif -+ -+#if defined(__ia64__) || defined(__ia64) -+ stacksize /= 2; -+#endif -+ return stacksize; -+} -+ -+ - static void start_signal_handler(void) - { - int error; -@@ -2663,15 +2727,7 @@ - #if !defined(HAVE_DEC_3_2_THREADS) - pthread_attr_setscope(&thr_attr,PTHREAD_SCOPE_SYSTEM); - (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED); --#if defined(__ia64__) || defined(__ia64) -- /* -- Peculiar things with ia64 platforms - it seems we only have half the -- stack size in reality, so we have to double it here -- */ -- pthread_attr_setstacksize(&thr_attr,my_thread_stack_size*2); --#else -- pthread_attr_setstacksize(&thr_attr,my_thread_stack_size); --#endif -+ (void) my_setstacksize(&thr_attr,my_thread_stack_size); - #endif - - mysql_mutex_lock(&LOCK_thread_count); -@@ -4445,37 +4501,7 @@ - unireg_abort(1); // Will do exit - - init_signals(); --#if defined(__ia64__) || defined(__ia64) -- /* -- Peculiar things with ia64 platforms - it seems we only have half the -- stack size in reality, so we have to double it here -- */ -- pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size*2); --#else -- pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size); --#endif --#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE -- { -- /* Retrieve used stack size; Needed for checking stack overflows */ -- size_t stack_size= 0; -- pthread_attr_getstacksize(&connection_attrib, &stack_size); --#if defined(__ia64__) || defined(__ia64) -- stack_size/= 2; --#endif -- /* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */ -- if (stack_size && stack_size < my_thread_stack_size) -- { -- if (global_system_variables.log_warnings) -- sql_print_warning("Asked for %lu thread stack, but got %ld", -- my_thread_stack_size, (long) stack_size); --#if defined(__ia64__) || defined(__ia64) -- my_thread_stack_size= stack_size*2; --#else -- my_thread_stack_size= stack_size; --#endif -- } -- } --#endif -+ my_thread_stack_size = my_setstacksize(&connection_attrib,my_thread_stack_size); - - (void) thr_setconcurrency(concurrency); // 10 by default - diff --git a/packaging/rpm-uln/mysql-5.5-testing.patch b/packaging/rpm-uln/mysql-5.5-testing.patch deleted file mode 100644 index 74387135346..00000000000 --- a/packaging/rpm-uln/mysql-5.5-testing.patch +++ /dev/null @@ -1,23 +0,0 @@ -Hack the top-level Makefile to enable the openssl regression tests. -(Why doesn't this happen automatically given the configure option??) - -Also, increase the overall timeout for the regression tests to 12 hours, -because on a slow or heavily-loaded build machine sometimes the default of -5 hours isn't enough. (This has been demonstrated to fail in mass-rebuild -scenarios, which aren't that uncommon for Fedora.) Similarly increase the -per-testcase timeout to 30 minutes, since the default of 15 hasn't got a -great deal of headroom either. - - -diff -Naur mysql-5.1.32.orig/Makefile.am mysql-5.1.32/Makefile.am ---- mysql-5.1.32.orig/Makefile.am 2009-02-13 19:51:56.000000000 -0500 -+++ mysql-5.1.32/Makefile.am 2009-03-04 18:12:36.000000000 -0500 -@@ -98,7 +98,7 @@ - - test-ns: - cd mysql-test ; \ -- @PERL@ ./mysql-test-run.pl $(force) $(mem) --mysqld=--binlog-format=mixed -+ @PERL@ ./mysql-test-run.pl $(force) $(mem) --ssl --mysqld=--binlog-format=mixed --suite-timeout=720 --testcase-timeout=30 - - test-binlog-statement: - cd mysql-test ; \ diff --git a/packaging/rpm-uln/mysql-chain-certs.patch b/packaging/rpm-uln/mysql-chain-certs.patch deleted file mode 100644 index 4e26af16cb0..00000000000 --- a/packaging/rpm-uln/mysql-chain-certs.patch +++ /dev/null @@ -1,45 +0,0 @@ -Fix things so that chains of certificates work in the server and client -certificate files. - -This only really works for OpenSSL-based builds, as yassl is unable to read -multiple certificates from a file. The patch below to yassl/src/ssl.cpp -doesn't fix that, but just arranges that the viosslfactories.c patch won't -have any ill effects in a yassl build. Since we don't use yassl in Red Hat/ -Fedora builds, I'm not feeling motivated to try to fix yassl for this. - -See RH bug #598656. Filed upstream at http://bugs.mysql.com/bug.php?id=54158 - - === - -Joerg Bruehe, MySQL Build Team at Oracle: First patch adapted to code changes in MySQL 5.5 - - -diff -Naur mysql-5.5.29.orig/vio/viosslfactories.c mysql-5.5.29/vio/viosslfactories.c ---- mysql-5.5.29.orig/vio/viosslfactories.c 2010-05-06 11:28:07.000000000 -0400 -+++ mysql-5.5.29/vio/viosslfactories.c 2010-05-26 23:23:46.000000000 -0400 -@@ -106,7 +106,7 @@ - key_file= cert_file; - - if (cert_file && -- SSL_CTX_use_certificate_file(ctx, cert_file, SSL_FILETYPE_PEM) <= 0) -+ SSL_CTX_use_certificate_chain_file(ctx, cert_file) <= 0) - { - *error= SSL_INITERR_CERT; - DBUG_PRINT("error",("%s from file '%s'", sslGetErrString(*error), cert_file)); -diff -Naur mysql-5.1.47.orig/extra/yassl/src/ssl.cpp mysql-5.1.47/extra/yassl/src/ssl.cpp ---- mysql-5.1.47.orig/extra/yassl/src/ssl.cpp 2010-05-06 11:24:26.000000000 -0400 -+++ mysql-5.1.47/extra/yassl/src/ssl.cpp 2010-05-26 23:29:13.000000000 -0400 -@@ -1606,10 +1606,10 @@ - } - - -- int SSL_CTX_use_certificate_chain_file(SSL_CTX*, const char*) -+ int SSL_CTX_use_certificate_chain_file(SSL_CTX* ctx, const char* file) - { -- // TDOD: -- return SSL_SUCCESS; -+ // For the moment, treat like use_certificate_file -+ return read_file(ctx, file, SSL_FILETYPE_PEM, Cert); - } - - diff --git a/packaging/rpm-uln/mysql-embedded-check.c b/packaging/rpm-uln/mysql-embedded-check.c deleted file mode 100644 index 8bf8ca53dad..00000000000 --- a/packaging/rpm-uln/mysql-embedded-check.c +++ /dev/null @@ -1,26 +0,0 @@ -/* simple test program to see if we can link the embedded server library */ - -#include <stdio.h> -#include <stdlib.h> -#include <stdarg.h> - -#include "mysql.h" - -MYSQL *mysql; - -static char *server_options[] = \ - { "mysql_test", "--defaults-file=my.cnf", NULL }; -int num_elements = (sizeof(server_options) / sizeof(char *)) - 1; - -static char *server_groups[] = { "libmysqld_server", - "libmysqld_client", NULL }; - -int main(int argc, char **argv) -{ - mysql_library_init(num_elements, server_options, server_groups); - mysql = mysql_init(NULL); - mysql_close(mysql); - mysql_library_end(); - - return 0; -} diff --git a/packaging/rpm-uln/mysql-expired-certs.patch b/packaging/rpm-uln/mysql-expired-certs.patch deleted file mode 100644 index acd3a78cce7..00000000000 --- a/packaging/rpm-uln/mysql-expired-certs.patch +++ /dev/null @@ -1,555 +0,0 @@ -Upstream insists on generating SSL testing certificates with relatively short -lifespan, which has repeatedly caused problems (ie, one day the regression -tests suddenly stop working). Replace them with certificates with 20-year -lifespan. We should periodically regenerate these, too, but at least not -very often. - - -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/cacert.pem mysql-5.1.50/mysql-test/std_data/cacert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/cacert.pem 2010-08-03 13:55:04.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/cacert.pem 2010-08-27 23:42:05.751428144 -0400 -@@ -1,17 +1,22 @@ - -----BEGIN CERTIFICATE----- --MIICrTCCAhagAwIBAgIJAMI7xZKjhrDbMA0GCSqGSIb3DQEBBAUAMEQxCzAJBgNV -+MIIDsjCCApqgAwIBAgIJAL5YrUwfPSWVMA0GCSqGSIb3DQEBBQUAMEQxCzAJBgNV - BAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdVcHBzYWxhMREwDwYD --VQQKEwhNeVNRTCBBQjAeFw0xMDAxMjkxMTQ3MTBaFw0xNTAxMjgxMTQ3MTBaMEQx -+VQQKEwhNeVNRTCBBQjAeFw0xMDAxMjkwNTU5NTNaFw0xNTAxMjgwNTU5NTNaMEQx - CzAJBgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdVcHBzYWxh --MREwDwYDVQQKEwhNeVNRTCBBQjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA --wQYsOEfrN4ESP3FjsI8cghE+tZVuyK2gck61lwieVxjgFMtBd65mI5a1y9pmlOI1 --yM4SB2Ppqcuw7/e1CdV1y7lvHrGNt5yqEHbN4QX1gvsN8TQauP/2WILturk4R4Hq --rKg0ZySu7f1Xhl0ed9a48LpaEHD17IcxWEGMMJwAxF0CAwEAAaOBpjCBozAMBgNV --HRMEBTADAQH/MB0GA1UdDgQWBBSvktYQ0ahLnyxyVKqty+WpBbBrDTB0BgNVHSME --bTBrgBSvktYQ0ahLnyxyVKqty+WpBbBrDaFIpEYwRDELMAkGA1UEBhMCU0UxEDAO --BgNVBAgTB1VwcHNhbGExEDAOBgNVBAcTB1VwcHNhbGExETAPBgNVBAoTCE15U1FM --IEFCggkAwjvFkqOGsNswDQYJKoZIhvcNAQEEBQADgYEAdKN1PjwMHAKG2Ww1145g --JQGBnKxSFOUaoSvkBi/4ntTM+ysnViWh7WvxyWjR9zU9arfr7aqsDeQxm0XDOqzj --AQ/cQIla2/Li8tXyfc06bisH/IHRaSc2zWqioTKbEwMdVOdrvq4a8V8ic3xYyIWn --7F4WeS07J8LKardSvM0+hOA= -+MREwDwYDVQQKEwhNeVNRTCBBQjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -+ggEBAL6kNN4peX7uhK9rb06W/QbPEpVuejmdWdl2PqMshP/eSuXXw7kwVgfpxx9R -+vC000CKQQSG9MCoZjtqPnFRsetmWLZgApRpEalGXTXJqq9sEbCfoFizg94U8G7d2 -+u5XJjLVmcG34ru36KoBgVx1zeH1puBAf8dOzrE4L7Y+ZQBFzFohjh8C2LqWC4nM5 -+qsLmOkDWMipGqYU5DvkKjIbTbwTyRNRgZHWSPfVDDPUIUOsY4BGUp2DpgeGY9aEv -+lIs57Ev9JqlIUCV65lOhhDkG+xwmkHKHA+ECEU9cALI8+uXbh48MB9XpMOuk408X -+/lX89aZwD0/G9kmObVGnE2G+H5UCAwEAAaOBpjCBozAdBgNVHQ4EFgQUsft+d7VA -+jWgRftkR5cPG2k2sUbAwdAYDVR0jBG0wa4AUsft+d7VAjWgRftkR5cPG2k2sUbCh -+SKRGMEQxCzAJBgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdV -+cHBzYWxhMREwDwYDVQQKEwhNeVNRTCBBQoIJAL5YrUwfPSWVMAwGA1UdEwQFMAMB -+Af8wDQYJKoZIhvcNAQEFBQADggEBALRUOAmdL8R8sl1y8kiEiFgDatdXK5RDqWai -+8yZChfmwTIToHhmQsOEshJe2e8hky3huUj+33VyXjINoMbebIwMuXPwEkbJal8RZ -+nSJmF0jN1Qz7J/jFffwK9xmejWZJx49Kt2+Qwrwp6kDeq9TLFqQOoVczgyJPYsTL -+NAOib5WqTud3XWvCwxrhqmWu7JZq6sp1fomP/uunprb8y2miWfLESZN2mKAhm44Q -+Lws867LT8v2lskEjq2dT1LutD5+R66XcdjgSr0uDziDs64jZwCD6ea94hVFM7ej0 -+ZOXYeSEZJ56FjUxu632e9fY8NyMh30yKjjmQf1mM9PuGJvdvsWU= - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/client-cert.pem mysql-5.1.50/mysql-test/std_data/client-cert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/client-cert.pem 2010-08-03 13:55:04.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/client-cert.pem 2010-08-27 23:42:05.752428395 -0400 -@@ -1,46 +1,69 @@ - Certificate: - Data: -- Version: 1 (0x0) -- Serial Number: 1048577 (0x100001) -- Signature Algorithm: md5WithRSAEncryption -+ Version: 3 (0x2) -+ Serial Number: 6 (0x6) -+ Signature Algorithm: sha1WithRSAEncryption - Issuer: C=SE, ST=Uppsala, L=Uppsala, O=MySQL AB - Validity -- Not Before: Jan 29 11:50:22 2010 GMT -- Not After : Jan 28 11:50:22 2015 GMT -+ Not Before: Feb 20 03:03:26 2010 GMT -+ Not After : Sep 3 03:03:26 2030 GMT - Subject: C=SE, ST=Uppsala, O=MySQL AB - Subject Public Key Info: - Public Key Algorithm: rsaEncryption -- Public-Key: (1024 bit) -- Modulus: -- 00:cc:9a:37:49:13:66:dc:cf:e3:0b:13:a1:23:ed: -- 78:db:4e:bd:11:f6:8c:0d:76:f9:a3:32:56:9a:f8: -- a1:21:6a:55:4e:4d:3f:e6:67:9d:26:99:b2:cd:a4: -- 9a:d2:2b:59:5c:d7:8a:d3:60:68:f8:18:bd:c5:be: -- 15:e1:2a:3c:a3:d4:61:cb:f5:11:94:17:81:81:f7: -- 87:8c:f6:6a:d2:ee:d8:e6:77:f6:62:66:4d:2e:16: -- 8d:08:81:4a:c9:c6:4b:31:e5:b9:c7:8a:84:96:48: -- a7:47:8c:0d:26:90:56:4e:e6:a5:6e:8c:b3:f2:9f: -- fc:3d:78:9b:49:6e:86:83:77 -+ RSA Public Key: (1024 bit) -+ Modulus (1024 bit): -+ 00:c2:e7:20:cf:89:59:2f:67:cb:4c:9f:e8:11:f2: -+ 23:e5:f1:b1:ee:3f:66:5f:c3:f5:fd:1e:31:ee:8f: -+ 4c:2a:bd:c0:4a:a5:9f:c8:44:d5:77:8f:15:1b:4d: -+ 78:6e:b2:a2:48:a5:24:33:05:40:02:b3:c1:87:8d: -+ 59:3c:1a:07:aa:86:f0:04:e1:9c:20:4b:22:32:c4: -+ 51:9e:40:e4:31:c3:57:f5:98:bf:2e:b1:fd:2c:56: -+ bf:49:d9:9b:e7:17:cc:95:5f:b5:08:19:5e:9d:df: -+ 65:22:39:2c:48:fb:69:96:31:7a:35:4d:de:60:b4: -+ c1:60:19:5f:96:56:7e:55:19 - Exponent: 65537 (0x10001) -- Signature Algorithm: md5WithRSAEncryption -- 5e:1f:a3:53:5f:24:13:1c:f8:28:32:b0:7f:69:69:f3:0e:c0: -- 34:87:10:03:7d:da:15:8b:bd:19:b8:1a:56:31:e7:85:49:81: -- c9:7f:45:20:74:3e:89:c0:e0:26:84:51:cc:04:16:ce:69:99: -- 01:e1:26:99:b3:e3:f5:bd:ec:5f:a0:84:e4:38:da:75:78:7b: -- 89:9c:d2:cd:60:95:20:ba:8e:e3:7c:e6:df:76:3a:7c:89:77: -- 02:94:86:11:3a:c4:61:7d:6f:71:83:21:8a:17:fb:17:e2:ee: -- 02:6b:61:c1:b4:52:63:d7:d8:46:b2:c5:9c:6f:38:91:8a:35: -- 32:0b -+ X509v3 extensions: -+ X509v3 Basic Constraints: -+ CA:FALSE -+ X509v3 Subject Key Identifier: -+ 8D:10:67:91:33:76:9C:02:E5:78:5D:D8:C5:EF:25:96:B2:D7:FA:1F -+ X509v3 Authority Key Identifier: -+ keyid:B1:FB:7E:77:B5:40:8D:68:11:7E:D9:11:E5:C3:C6:DA:4D:AC:51:B0 -+ DirName:/C=SE/ST=Uppsala/L=Uppsala/O=MySQL AB -+ serial:BE:58:AD:4C:1F:3D:25:95 -+ -+ Signature Algorithm: sha1WithRSAEncryption -+ a9:88:10:3e:5d:2a:47:29:c8:03:27:7a:31:5a:8e:10:03:bc: -+ b5:4e:37:1d:12:7b:eb:5f:50:71:70:b1:a3:8e:93:0e:77:17: -+ 6c:47:b6:c9:a4:4d:2a:c4:38:f0:61:55:b2:7f:28:ba:06:79: -+ ee:67:11:7d:d4:c9:7f:0a:18:c8:c1:cb:d0:2c:f9:63:0f:bb: -+ 45:ca:de:ea:bb:ac:00:01:52:48:36:2b:07:2b:c8:46:c7:b1: -+ 21:81:bd:77:39:e7:4c:39:aa:bd:ac:60:d8:a7:bf:cf:14:98: -+ 4a:0b:a1:40:55:06:8d:6f:35:a9:39:a0:71:a9:97:ba:7c:73: -+ 3c:41:ba:c5:1c:11:4b:2b:43:1d:2d:ba:7b:5f:14:b5:3d:64: -+ 62:15:36:b4:16:bd:78:c8:43:8d:f9:1c:a5:d2:ac:a1:58:74: -+ e1:99:de:ad:04:19:43:a8:bd:0a:fd:19:9b:50:44:46:6d:18: -+ 55:4d:bf:b4:5b:a4:93:62:c7:64:91:6c:54:34:d1:f8:f3:ff: -+ 12:6d:5f:85:e7:35:9e:5c:42:81:5e:fb:c8:bb:44:51:98:b2: -+ ef:1b:9f:5a:22:77:28:7d:da:fb:08:c2:94:9a:0f:42:08:93: -+ 54:10:1e:ad:f2:4f:fc:62:98:51:e9:9b:b9:3a:93:d9:e4:1f: -+ 1d:c4:76:d0 - -----BEGIN CERTIFICATE----- --MIIB5zCCAVACAxAAATANBgkqhkiG9w0BAQQFADBEMQswCQYDVQQGEwJTRTEQMA4G --A1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwg --QUIwHhcNMTAwMTI5MTE1MDIyWhcNMTUwMTI4MTE1MDIyWjAyMQswCQYDVQQGEwJT --RTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIwgZ8wDQYJKoZI --hvcNAQEBBQADgY0AMIGJAoGBAMyaN0kTZtzP4wsToSPteNtOvRH2jA12+aMyVpr4 --oSFqVU5NP+ZnnSaZss2kmtIrWVzXitNgaPgYvcW+FeEqPKPUYcv1EZQXgYH3h4z2 --atLu2OZ39mJmTS4WjQiBSsnGSzHluceKhJZIp0eMDSaQVk7mpW6Ms/Kf/D14m0lu --hoN3AgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAXh+jU18kExz4KDKwf2lp8w7ANIcQ --A33aFYu9GbgaVjHnhUmByX9FIHQ+icDgJoRRzAQWzmmZAeEmmbPj9b3sX6CE5Dja --dXh7iZzSzWCVILqO43zm33Y6fIl3ApSGETrEYX1vcYMhihf7F+LuAmthwbRSY9fY --RrLFnG84kYo1Mgs= -+MIIDETCCAfmgAwIBAgIBBjANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJTRTEQ -+MA4GA1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlT -+UUwgQUIwHhcNMTAwMjIwMDMwMzI2WhcNMzAwOTAzMDMwMzI2WjAyMQswCQYDVQQG -+EwJTRTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIwgZ8wDQYJ -+KoZIhvcNAQEBBQADgY0AMIGJAoGBAMLnIM+JWS9ny0yf6BHyI+Xxse4/Zl/D9f0e -+Me6PTCq9wEqln8hE1XePFRtNeG6yokilJDMFQAKzwYeNWTwaB6qG8AThnCBLIjLE -+UZ5A5DHDV/WYvy6x/SxWv0nZm+cXzJVftQgZXp3fZSI5LEj7aZYxejVN3mC0wWAZ -+X5ZWflUZAgMBAAGjgaMwgaAwCQYDVR0TBAIwADAdBgNVHQ4EFgQUjRBnkTN2nALl -+eF3Yxe8llrLX+h8wdAYDVR0jBG0wa4AUsft+d7VAjWgRftkR5cPG2k2sUbChSKRG -+MEQxCzAJBgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdVcHBz -+YWxhMREwDwYDVQQKEwhNeVNRTCBBQoIJAL5YrUwfPSWVMA0GCSqGSIb3DQEBBQUA -+A4IBAQCpiBA+XSpHKcgDJ3oxWo4QA7y1TjcdEnvrX1BxcLGjjpMOdxdsR7bJpE0q -+xDjwYVWyfyi6BnnuZxF91Ml/ChjIwcvQLPljD7tFyt7qu6wAAVJINisHK8hGx7Eh -+gb13OedMOaq9rGDYp7/PFJhKC6FAVQaNbzWpOaBxqZe6fHM8QbrFHBFLK0MdLbp7 -+XxS1PWRiFTa0Fr14yEON+Ryl0qyhWHThmd6tBBlDqL0K/RmbUERGbRhVTb+0W6ST -+YsdkkWxUNNH48/8SbV+F5zWeXEKBXvvIu0RRmLLvG59aIncofdr7CMKUmg9CCJNU -+EB6t8k/8YphR6Zu5OpPZ5B8dxHbQ - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/client-key.pem mysql-5.1.50/mysql-test/std_data/client-key.pem ---- mysql-5.1.50.orig/mysql-test/std_data/client-key.pem 2010-08-03 13:55:05.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/client-key.pem 2010-08-27 23:42:05.752428395 -0400 -@@ -1,15 +1,15 @@ - -----BEGIN RSA PRIVATE KEY----- --MIICXQIBAAKBgQDMmjdJE2bcz+MLE6Ej7XjbTr0R9owNdvmjMlaa+KEhalVOTT/m --Z50mmbLNpJrSK1lc14rTYGj4GL3FvhXhKjyj1GHL9RGUF4GB94eM9mrS7tjmd/Zi --Zk0uFo0IgUrJxksx5bnHioSWSKdHjA0mkFZO5qVujLPyn/w9eJtJboaDdwIDAQAB --AoGASqk/4We2En+93y3jkIO4pXafIe3w/3zZ7caRue1ehx4RUQh5d+95djuB9u7J --HEZ7TpjM7QNyao5EueL6gvbxt0LXFvqAMni7yM9tt/HUYtHHPqYiRtUny9bKYFTm --l8szCCMal/wD9GZU9ByHDNHm7tHUMyMhARNTYSgx+SERFmECQQD/6jJocC4SXf6f --T3LqimWR02lbJ7qCoDgRglsUXh0zjrG+IIiAyE+QOCCx1GMe3Uw6bsIuYwdHT6as --WcdPs04xAkEAzKulvEvLVvN5zfa/DTYRTV7jh6aDleOxjsD5oN/oJXoACnPzVuUL --qQQMNtuAXm6Q1QItrRxpQsSKbY0UQka6JwJBAOSgoNoG5lIIYTKIMvzwGV+XBLeo --HYsXgh+6Wo4uql3mLErUG78ZtWL9kc/tE4R+ZdyKGLaCR/1gXmH5bwN4B/ECQEBb --uUH8k3REG4kojesZlVc+/00ojzgS4UKCa/yqa9VdB6ZBz8MDQydinnShkTwgiGpy --xOoqhO753o2UT0qH8wECQQC99IEJWUnwvExVMkLaZH5NjAFJkb22sjkmuT11tAgU --RQgOMoDOm6driojnOnDWOkx1r1Gy9NgMLooduja4v6cx -+MIICWwIBAAKBgQDC5yDPiVkvZ8tMn+gR8iPl8bHuP2Zfw/X9HjHuj0wqvcBKpZ/I -+RNV3jxUbTXhusqJIpSQzBUACs8GHjVk8GgeqhvAE4ZwgSyIyxFGeQOQxw1f1mL8u -+sf0sVr9J2ZvnF8yVX7UIGV6d32UiOSxI+2mWMXo1Td5gtMFgGV+WVn5VGQIDAQAB -+AoGARXcXLKDpVooJ3W+IyQyiWsw//IhANpWjUOm4JiyQmxMyO+i4ACr4Yjpu7WI5 -+MEseqAGj20NdwxjKO0PXsCIe5LmrGZ+SI8+CSERFOWXWRtCWz7y7SG30i1k6suvM -+mwqWom0tJLwn93uA1lm/WSwKQwUrJRahRQd3EaZqrl7DP5kCQQD/8gbuYAT5pxQe -+ULLGM0RvEsXxDYbEDxNbY5wrBazfklBwpumxZpFl6jEAT++7Kh2Ns3A7kB1oUNlA -+FPYr+dYPAkEAwvHEwRtoyUr8jqoqVVJWI76CDmBjEOzVeMKW97ztqbs2LxZW8dYI -+iOh/myFGpdoUwgu0U8w9MmXcj3ZeZCYKVwJALyQ+AJPw9qa+fuLwOq9gsHCtwrty -+EhSQxSlwrz/pWniRll439vPkXfgntF4E0t1r+hiN2Hqv3/HcQgBaYzkuIwJAG023 -+bACFxaOuCeFFepvEms8E8jSHy4gQQhCnCl24v8wLw76SQN7kZSCDNtwLRBFuVNtE -+z3PMonFn2eQPRmGZkwJAP1c1BHprMQx/ruafdscROILv3JrH40C1bR6KVVBKt1dK -+Qpnpgi7hK5rUQjDF8k3bn9ugTt06jyeHe/QhAml0kg== - -----END RSA PRIVATE KEY----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server-cert.pem mysql-5.1.50/mysql-test/std_data/server-cert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server-cert.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server-cert.pem 2010-08-27 23:42:05.753428361 -0400 -@@ -1,41 +1,69 @@ - Certificate: - Data: -- Version: 1 (0x0) -- Serial Number: 1048578 (0x100002) -- Signature Algorithm: md5WithRSAEncryption -+ Version: 3 (0x2) -+ Serial Number: 4 (0x4) -+ Signature Algorithm: sha1WithRSAEncryption - Issuer: C=SE, ST=Uppsala, L=Uppsala, O=MySQL AB - Validity -- Not Before: Jan 29 11:56:49 2010 GMT -- Not After : Jan 28 11:56:49 2015 GMT -+ Not Before: Feb 20 02:55:06 2010 GMT -+ Not After : Sep 3 02:55:06 2030 GMT - Subject: C=SE, ST=Uppsala, O=MySQL AB, CN=localhost - Subject Public Key Info: - Public Key Algorithm: rsaEncryption -- Public-Key: (512 bit) -- Modulus: -- 00:cd:e4:87:51:9d:72:11:a0:d1:fa:f3:92:8b:13: -- 1c:eb:f7:e2:9a:2f:72:a8:d6:65:48:d1:69:af:1b: -- c0:4c:13:e5:60:60:51:41:e9:ab:a6:bc:13:bb:0c: -- 5e:32:7c:d9:6c:9e:cd:05:24:84:78:db:80:91:2e: -- d8:88:2b:c2:ed -+ RSA Public Key: (1024 bit) -+ Modulus (1024 bit): -+ 00:e3:7d:4f:c2:23:77:a9:3a:2c:d2:69:59:a0:2f: -+ 4e:d1:51:4c:ae:8d:f5:17:cc:ce:58:9c:83:4f:0b: -+ a3:bb:29:a2:b8:1d:3e:1b:04:f9:a9:3e:e2:61:d0: -+ e6:7b:b9:7c:12:d8:1f:86:c9:53:b5:04:dd:df:26: -+ e9:c0:2b:de:4a:96:2e:f3:23:6f:79:6d:a9:d2:4e: -+ 17:af:2f:de:8b:68:44:ae:de:a3:e2:c4:37:1c:04: -+ ad:73:4b:85:f9:83:ac:fe:b7:c1:54:47:2e:96:d4: -+ 31:96:85:94:69:d6:5a:63:24:04:99:89:19:1d:56: -+ 8a:d1:77:aa:87:fb:38:cd:b7 - Exponent: 65537 (0x10001) -- Signature Algorithm: md5WithRSAEncryption -- 73:ce:9c:6e:39:46:b4:14:be:da:3f:f3:1b:ba:90:bc:23:43: -- d7:82:2a:70:4e:a6:d9:5a:65:5c:b7:df:71:df:75:77:c5:80: -- a4:af:fa:d2:59:e2:fd:c9:9c:f0:98:95:8e:69:a9:8c:7c:d8: -- 6f:48:d2:e3:36:e0:cd:ff:3f:d1:a5:e6:ab:75:09:c4:50:10: -- c4:96:dd:bf:3b:de:32:46:da:ca:4a:f1:d6:52:8a:33:2f:ab: -- f5:2e:70:3f:d4:9c:be:00:c8:03:f9:39:8a:df:5b:70:3c:40: -- ef:03:be:7c:3d:1d:32:32:f3:51:81:e2:83:30:6e:3d:38:9b: -- fb:3c -+ X509v3 extensions: -+ X509v3 Basic Constraints: -+ CA:FALSE -+ X509v3 Subject Key Identifier: -+ CC:8C:71:40:D0:0F:BF:D1:99:79:3F:1B:E9:10:76:19:67:36:0F:A3 -+ X509v3 Authority Key Identifier: -+ keyid:B1:FB:7E:77:B5:40:8D:68:11:7E:D9:11:E5:C3:C6:DA:4D:AC:51:B0 -+ DirName:/C=SE/ST=Uppsala/L=Uppsala/O=MySQL AB -+ serial:BE:58:AD:4C:1F:3D:25:95 -+ -+ Signature Algorithm: sha1WithRSAEncryption -+ 6f:ad:5e:59:fa:84:3a:be:e2:72:b1:e8:66:2a:4e:f8:73:19: -+ 11:06:11:92:78:56:3e:d6:e8:68:29:90:8b:59:d2:fe:aa:ae: -+ 25:59:c7:e9:99:bb:4a:06:43:dd:40:bd:cb:f4:ae:79:95:7d: -+ 8e:90:ef:58:d2:a8:fc:bf:07:f3:37:b2:9b:bd:da:e6:8c:56: -+ dd:5e:c6:4a:70:7c:3e:3d:a1:e8:35:06:b8:a7:7b:ac:26:85: -+ 54:5d:09:a2:7b:77:b4:17:7f:72:31:cb:ff:cc:67:6d:e6:3e: -+ c6:dc:96:eb:4a:0a:ae:e9:48:ae:8a:e0:d6:73:57:6e:32:4c: -+ 00:dc:28:da:55:b3:9f:9f:d8:98:cc:d9:f1:b6:b3:14:67:2e: -+ a1:47:1e:51:11:cf:70:9f:31:8f:ba:59:29:f2:d0:88:0b:e2: -+ 51:6b:f8:31:ed:6d:ac:00:5e:d3:78:4c:95:97:02:cc:74:2b: -+ 3b:c6:28:e6:2a:c3:30:99:35:b4:4d:31:46:d4:90:f2:47:ed: -+ 64:85:1a:75:2a:72:0a:2f:c6:3a:2f:d2:ac:6b:31:cc:e5:a8: -+ 07:c2:d6:22:f3:c6:0f:bf:67:d9:d6:b2:79:cd:48:b5:c3:e0: -+ e3:18:7f:b5:74:c9:43:19:fb:c4:93:29:ca:cc:90:2b:1b:6f: -+ 45:f6:25:f9 - -----BEGIN CERTIFICATE----- --MIIBtzCCASACAxAAAjANBgkqhkiG9w0BAQQFADBEMQswCQYDVQQGEwJTRTEQMA4G --A1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwg --QUIwHhcNMTAwMTI5MTE1NjQ5WhcNMTUwMTI4MTE1NjQ5WjBGMQswCQYDVQQGEwJT --RTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIxEjAQBgNVBAMT --CWxvY2FsaG9zdDBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQDN5IdRnXIRoNH685KL --Exzr9+KaL3Ko1mVI0WmvG8BME+VgYFFB6aumvBO7DF4yfNlsns0FJIR424CRLtiI --K8LtAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAc86cbjlGtBS+2j/zG7qQvCND14Iq --cE6m2VplXLffcd91d8WApK/60lni/cmc8JiVjmmpjHzYb0jS4zbgzf8/0aXmq3UJ --xFAQxJbdvzveMkbaykrx1lKKMy+r9S5wP9ScvgDIA/k5it9bcDxA7wO+fD0dMjLz --UYHigzBuPTib+zw= -+MIIDJTCCAg2gAwIBAgIBBDANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJTRTEQ -+MA4GA1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlT -+UUwgQUIwHhcNMTAwMjIwMDI1NTA2WhcNMzAwOTAzMDI1NTA2WjBGMQswCQYDVQQG -+EwJTRTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIxEjAQBgNV -+BAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA431PwiN3 -+qTos0mlZoC9O0VFMro31F8zOWJyDTwujuymiuB0+GwT5qT7iYdDme7l8EtgfhslT -+tQTd3ybpwCveSpYu8yNveW2p0k4Xry/ei2hErt6j4sQ3HAStc0uF+YOs/rfBVEcu -+ltQxloWUadZaYyQEmYkZHVaK0Xeqh/s4zbcCAwEAAaOBozCBoDAJBgNVHRMEAjAA -+MB0GA1UdDgQWBBTMjHFA0A+/0Zl5PxvpEHYZZzYPozB0BgNVHSMEbTBrgBSx+353 -+tUCNaBF+2RHlw8baTaxRsKFIpEYwRDELMAkGA1UEBhMCU0UxEDAOBgNVBAgTB1Vw -+cHNhbGExEDAOBgNVBAcTB1VwcHNhbGExETAPBgNVBAoTCE15U1FMIEFCggkAvlit -+TB89JZUwDQYJKoZIhvcNAQEFBQADggEBAG+tXln6hDq+4nKx6GYqTvhzGREGEZJ4 -+Vj7W6GgpkItZ0v6qriVZx+mZu0oGQ91Avcv0rnmVfY6Q71jSqPy/B/M3spu92uaM -+Vt1exkpwfD49oeg1Brine6wmhVRdCaJ7d7QXf3Ixy//MZ23mPsbclutKCq7pSK6K -+4NZzV24yTADcKNpVs5+f2JjM2fG2sxRnLqFHHlERz3CfMY+6WSny0IgL4lFr+DHt -+bawAXtN4TJWXAsx0KzvGKOYqwzCZNbRNMUbUkPJH7WSFGnUqcgovxjov0qxrMczl -+qAfC1iLzxg+/Z9nWsnnNSLXD4OMYf7V0yUMZ+8STKcrMkCsbb0X2Jfk= - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server-key.pem mysql-5.1.50/mysql-test/std_data/server-key.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server-key.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server-key.pem 2010-08-27 23:42:05.754428433 -0400 -@@ -1,9 +1,15 @@ - -----BEGIN RSA PRIVATE KEY----- --MIIBOwIBAAJBAM3kh1GdchGg0frzkosTHOv34povcqjWZUjRaa8bwEwT5WBgUUHp --q6a8E7sMXjJ82WyezQUkhHjbgJEu2Igrwu0CAwEAAQJBAJuwhFbF3NzRpBbEmnqJ --4GPa1UJMQMLFJF+04tqj/HxJcAIVhOJhGmmtYNw1yjz/ZsPnfJCMz4eFOtdjvGtf --peECIQDmFFg2WLvYo+2m9w9V7z4ZIkg7ixYkI/ObUUctfZkPOQIhAOUWnrvjFrAX --bIvYT/YR50+3ZDLEc51XxNgJnWqWYl1VAiEAnTOFWgyivFC1DgF8PvDp8u5TgCt2 --A1d1GMgd490O+TECIC/WMl0/hTxOF9930vKqOGf//o9PUGkZq8QE9fcM4gtlAiAE --iOcFpnLjtWj57jrhuw214ucnB5rklkQQe+AtcARNkg== -+MIICXgIBAAKBgQDjfU/CI3epOizSaVmgL07RUUyujfUXzM5YnINPC6O7KaK4HT4b -+BPmpPuJh0OZ7uXwS2B+GyVO1BN3fJunAK95Kli7zI295banSThevL96LaESu3qPi -+xDccBK1zS4X5g6z+t8FURy6W1DGWhZRp1lpjJASZiRkdVorRd6qH+zjNtwIDAQAB -+AoGAUb0o91y/FjMs/72S0pes/lDz+JRRSGfyjKxQEgrgndNsADOhqRu0iTdrKDJj -+XnlbN3ooecnFJfnFrvTQcJhSmlS30j6VrBw6LXpCBK3dvjYgJ9LOne7WK+dF1+vS -+FMQtsP04C56Sxy6HJDpMyWJ6oS3Bu169ygG2AxKo+Fk+E6ECQQD38w/MzmrARz2Z -+AGeEPDUnVZPYgtmXkmks95S0/2jSoLhmgpvJimzxwpYwVG/BG8dSDVuTDu5kp05D -+3bZIp3EzAkEA6uAwJsCZPtHXlWU3wYZJsA697rUNjPaCQOIaZ/lnh5RUHTmUiw1h -+Oj/VORqKB0kXqcDfawwLjZEvh1Xli+H5bQJBANTmhw2TvEPnp/OFTl1UGUvyBmXl -+TRMB639qAu07VfVtfYi/4ya1zn/0VmOfTOoigQ5qW9Q1AOu6YNCTQl62L9MCQQDc -+YfEsW2kvNYxYJHoVfuBjbuGuOnn1e1Oqd70ZND59S6NFLMMBWlORaVWzWACNZ3rp -+kAzSj6HDeqgjD2jsQONdAkEAt7S1YHUn8F760bRn4AnAto2TVOYdArtTP/wYjd4o -+9rJREO/d8AYkYJ96APLvF0SZ4n3t1pLwQRsKKN8ZGTmzLA== - -----END RSA PRIVATE KEY----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server8k-cert.pem mysql-5.1.50/mysql-test/std_data/server8k-cert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server8k-cert.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server8k-cert.pem 2010-08-27 23:43:00.005366270 -0400 -@@ -1,51 +1,69 @@ -+Certificate: -+ Data: -+ Version: 3 (0x2) -+ Serial Number: 5 (0x5) -+ Signature Algorithm: sha1WithRSAEncryption -+ Issuer: C=SE, ST=Uppsala, L=Uppsala, O=MySQL AB -+ Validity -+ Not Before: Feb 20 03:00:54 2010 GMT -+ Not After : Sep 3 03:00:54 2030 GMT -+ Subject: C=SE, ST=Uppsala, O=MySQL AB, CN=server -+ Subject Public Key Info: -+ Public Key Algorithm: rsaEncryption -+ RSA Public Key: (1024 bit) -+ Modulus (1024 bit): -+ 00:c5:da:44:95:06:77:16:21:af:a0:c4:3c:e9:f8: -+ 1d:2d:95:f9:63:90:8c:3f:86:ba:77:76:4a:52:4b: -+ 6b:af:29:f5:1c:aa:d4:3f:3e:42:9f:6d:46:ba:86: -+ 90:b1:2d:cc:db:c6:33:15:a3:f4:af:53:33:4f:a1: -+ 56:d1:aa:3b:26:10:f7:64:b5:f9:bf:1b:b1:47:8e: -+ cc:a6:d6:0d:aa:4a:77:e3:a3:63:9d:2a:dc:65:f4: -+ 7f:91:17:38:2d:d6:cd:4e:8d:53:52:97:6e:87:fc: -+ 64:60:a6:a1:00:ac:96:6c:e4:42:94:75:17:46:6f: -+ 91:b5:dd:06:47:ed:05:e3:db -+ Exponent: 65537 (0x10001) -+ X509v3 extensions: -+ X509v3 Basic Constraints: -+ CA:FALSE -+ X509v3 Subject Key Identifier: -+ 6E:60:3F:29:13:60:99:ED:0C:F7:15:B5:DB:7B:1C:FB:6F:60:19:ED -+ X509v3 Authority Key Identifier: -+ keyid:B1:FB:7E:77:B5:40:8D:68:11:7E:D9:11:E5:C3:C6:DA:4D:AC:51:B0 -+ DirName:/C=SE/ST=Uppsala/L=Uppsala/O=MySQL AB -+ serial:BE:58:AD:4C:1F:3D:25:95 -+ -+ Signature Algorithm: sha1WithRSAEncryption -+ 63:2e:0f:07:14:06:cf:74:90:3d:37:42:f2:48:70:60:21:bc: -+ 34:52:31:f1:87:70:d2:b2:fb:ff:13:38:dc:f0:5e:43:d7:ee: -+ a7:c7:1f:ac:aa:d2:8c:4f:fa:3c:4c:73:f6:b6:c2:0c:a0:ea: -+ a2:c9:e2:73:61:c3:2e:78:40:0f:2a:d3:63:50:9b:b8:f9:89: -+ 40:ed:98:08:97:c3:07:24:17:34:b5:78:89:0a:bb:83:4c:e2: -+ 5c:2e:13:d6:21:30:ad:30:48:b5:70:12:ff:4a:6f:42:f0:f8: -+ 9f:b1:4b:bd:89:2b:f0:9d:e2:49:2b:35:69:18:1f:76:40:b4: -+ 76:bd:cb:dd:27:2f:c0:c1:e2:33:3e:6e:df:68:54:19:92:8a: -+ bb:13:9c:cf:d6:17:56:da:bf:0d:64:70:3a:45:b7:aa:5f:e3: -+ f5:96:ae:34:f2:17:37:27:d0:4b:e8:30:4a:c0:02:42:e2:d2: -+ 30:eb:eb:c7:d7:ec:d8:df:5c:43:58:e2:6f:b7:58:54:0d:c4: -+ 01:71:2d:59:8f:44:c7:a1:6c:0b:41:28:fa:b7:63:a7:68:d3: -+ 4f:c3:0f:17:9e:b2:32:50:e6:0b:87:3d:e2:39:47:c0:d8:0a: -+ 3b:f6:af:50:68:0f:9d:ef:6e:34:0d:3a:07:94:f8:a4:d7:24: -+ 86:32:d3:b4 - -----BEGIN CERTIFICATE----- --MIIJFDCCBPwCAQEwDQYJKoZIhvcNAQEEBQAwTjELMAkGA1UEBhMCU0UxEDAOBgNV --BAgTB1VwcHNhbGExETAPBgNVBAoTCE15U1FMIEFCMQ0wCwYDVQQLEwRUZXN0MQsw --CQYDVQQDEwJDQTAeFw0xMDA3MjgxNDA3MjhaFw0xODEwMTQxNDA3MjhaMFIxCzAJ --BgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMREwDwYDVQQKEwhNeVNRTCBBQjEN --MAsGA1UECxMEVGVzdDEPMA0GA1UEAxMGc2VydmVyMIIEIjANBgkqhkiG9w0BAQEF --AAOCBA8AMIIECgKCBAEA6h3v1OWb9I9U/Z8diBu/xYGS8NCTD3ZESboHxVI2qSEC --PgxNNcG8Lh0ktQdgYcOe64MnDTZX0Bibm47hoDldrAlTSffFxQhylqBBoXxDF+Lr --hXIqCz7K0PsK+bYusL9ezJ7PETDnCT7oy95q4GXbKsutbNsm9if4ZE41gs2KnoU2 --DA7kvMmkKojrMIL4+BqTXA20LLo0iSbgvUTvpSJw4u96BeyzMNnxK2wP5vvTtUo5 --hACbfU87YjaSKs+q2VXCzfyYGZk1L1xk5GUI0bP+jutf1dDzNttW2/q2Nf5rxx09 --Gh/GwmOnEk1O7cOZ8VQCsOHirIM39NuSARsY6Y3G5XM4k2W4nxyR/RtdG9bvs/33 --aGsZ5V5yp7WSs8s9HHwaCPSsUiLKckQ7uA0TTRgbeweMrrLKovG57jsbBBB8pQD4 --PRd31qgxCdstWXHiWwRyI8vOLWENPXPFqA/rJwwqNdWTogy38aqVXxGYR8PIwjA2 --OaIwFjwGZcsPNLqw6bgAN8O2UBqZHWiMF8mi7brvioDvAIufZuqa2SqT/At45H83 --psQ6R4FsxZt6SAK7EsdPo8OYTrY1i4iPZd/eKhnEu2srEZgsKRwY5H1mvDH5fWCc --HSFu07sWmlmK6Or65Fsa0IaKLJiQDVVETd6xrI0wkM4AOcbKDrS7aywJ426dopbs --+LFdt4N0cdII4gBgJAfLuuA2yrDXRq4P6cgpVMy0R+0dEYE8zzm8zf1a+Ud273LS --9+LB+LJKwqbW8nOPBoiekimIKfJYoOA4+C/mAjsYl1sVjjEhXJAs9S9L2UvnUk1P --sZi4UKHI6eAIEl7VM1sQ4GbdZ0px2dF2Ax7pGkhD+DLpYyYkCprharKZdmuUNLUd --NhXxi/HSEiE+Uy+o8RIzmH7LuROl/ZgnfHjJEiBLt2qPvwrwYd4c3XuXWs4YsWfV --JTt8Mx2ihgVcdGy9//shCSmgJwR1oWrhgC10AEL2fKeRnYUal1i+IxFPp7nb8uwx --UADgR0cY4A3qR/JP489QFIcxBTVs65De+Bq3ecnujk6yeGpD9iptonq4Y8uNZMc1 --kOE7GiFGwR4EufT5SEMh+tUkjth2r+842vmZZuxrVQaohDiATmIJA07W51zKH+nQ --uw4qVKnAhPaDLCLc7YMIH9JcmkeQX0nf8/S2O2WYDH8glVDi5hfW08tCmV647vRY --nTIywUTO0lFpz7M+VyMNaJ6yXU6biBV5hLAI8C5ldr/SWI789W2+ebBaJ9gfK+PT --trohFSK37GcoSH4V6qSLJHCBASEsiddqHIHMLJZRYD+B6J3tLhjVUM43u+MEGbFT --d33ZDke/WzLTExWkaOv36e67gDBmgDuj9yroq3wGfwIDAQABMA0GCSqGSIb3DQEB --BAUAA4IEAQCc9RBhRbuWlmRZPZkqIdi5/+enyjoMmOa6ryJPxFSP8D2jrlHgQsk1 --+GsJmPFT3rwWfoGAQu/aeSX4sp8OhKVJtqNA6MJrGYnZIMolgYa1wZPbkjJsdEfi --UsZdIB0n2+KA0xwEdGPdkGCfNPBtOg557DkcyEvsIZ9ELp4Pp2XzWRhyFGasJZc4 --YwgD/3K2rpOPZoMkBKeKqV19j41OfLKGBVyuaqzitbu9+KT4RU1ibr2a+UuFCwdT --oqyN7bfWXjcjXOMkxCsOmLfKmqQxs7TEOVrYPTdYjamDxLy/e5g5FgoCxGY8iil0 --+YFLZyH6eEx/Os9DlG/M3O1MeRD9U97CdsphbDVZIDyWw5xeX8qQHJe0KSprAgiG --TLhTZHeyrKujQCQS1oFFmNy4gSqXt0j1/6/9T80j6HeyjiiYEaEQK9YLTAjRoA7W --VN8wtHI5F3RlNOVQEJks/bjdlpLL3VhaWtfewGh/mXRGcow84cgcsejMexmhreHm --JfTUl9+X1IFFxGq2/606A9ROQ7kN/s4rXu7/TiMODXI/kZijoWd2SCc7Z0YWoNo7 --IRKkmZtrsflJbObEuK2Jk59uqzSxyQOBId8qtbPo8qJJyHGV5GCp34g4x67BxJBo --h1iyVMamBAS5Ip1ejghuROrB8Hit8NhAZApXju62btJeXLX+mQayXb/wC/IXNJJD --83tXiLfZgs6GzLAq7+KW/64sZSvj87CPiNtxkvjchAvyr+fhbBXCrf4rlOjJE6SH --Je2/Jon7uqijncARGLBeYUT0Aa6k1slpXuSKxDNt7EIkP21kDZ5/OJ0Y1u587KVB --dEhuDgNf2/8ij7gAQBwBoZMe1DrwddrxgLLBlyHpAZetNYFZNT+Cs/OlpqI0Jm59 --kK9pX0BY4AGOd23XM3K/uLawdmf67kkftim7aVaqXFHPiWsJVtlzmidKvNSmbmZe --dOmMXp6PBoqcdusFVUS7vjd3KAes5wUX/CaTyOOPRu0LMSnpwEnaL76IC9x4Jd6d --7QqY/OFTjpPH8nP57LwouiT6MgSUCWGaOkPuBJ9w9sENSbbINpgJJ42iAe2kE+R7 --qEIvf/2ETCTseeQUqm2nWiSPLkNagEh6kojmEoKrGyrv3YjrSXSOY1a70tDVy43+ --ueQDQzNZm3Q7inpke2ZKvWyY0LQmLzP2te+tnNBcdLyKJx7emPRTuMUlEdK7cLbt --V3Sy9IKtyAXqqd66fPFj4NhJygyncj8M6CSqhG5L0GhDbkA8UJ8yK/gfKm3h5xe2 --utULK5VMtAhQt6cVahO59A9t/OI17y45bmlIgdlEQISzVFe9ZbIUJW44zBfPx74k --/w8pMRr8gEuRqpL2WdJiKGG6lhMHLVFo -+MIIDIjCCAgqgAwIBAgIBBTANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJTRTEQ -+MA4GA1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlT -+UUwgQUIwHhcNMTAwMjIwMDMwMDU0WhcNMzAwOTAzMDMwMDU0WjBDMQswCQYDVQQG -+EwJTRTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIxDzANBgNV -+BAMTBnNlcnZlcjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxdpElQZ3FiGv -+oMQ86fgdLZX5Y5CMP4a6d3ZKUktrryn1HKrUPz5Cn21GuoaQsS3M28YzFaP0r1Mz -+T6FW0ao7JhD3ZLX5vxuxR47MptYNqkp346NjnSrcZfR/kRc4LdbNTo1TUpduh/xk -+YKahAKyWbORClHUXRm+Rtd0GR+0F49sCAwEAAaOBozCBoDAJBgNVHRMEAjAAMB0G -+A1UdDgQWBBRuYD8pE2CZ7Qz3FbXbexz7b2AZ7TB0BgNVHSMEbTBrgBSx+353tUCN -+aBF+2RHlw8baTaxRsKFIpEYwRDELMAkGA1UEBhMCU0UxEDAOBgNVBAgTB1VwcHNh -+bGExEDAOBgNVBAcTB1VwcHNhbGExETAPBgNVBAoTCE15U1FMIEFCggkAvlitTB89 -+JZUwDQYJKoZIhvcNAQEFBQADggEBAGMuDwcUBs90kD03QvJIcGAhvDRSMfGHcNKy -++/8TONzwXkPX7qfHH6yq0oxP+jxMc/a2wgyg6qLJ4nNhwy54QA8q02NQm7j5iUDt -+mAiXwwckFzS1eIkKu4NM4lwuE9YhMK0wSLVwEv9Kb0Lw+J+xS72JK/Cd4kkrNWkY -+H3ZAtHa9y90nL8DB4jM+bt9oVBmSirsTnM/WF1bavw1kcDpFt6pf4/WWrjTyFzcn -+0EvoMErAAkLi0jDr68fX7NjfXENY4m+3WFQNxAFxLVmPRMehbAtBKPq3Y6do00/D -+DxeesjJQ5guHPeI5R8DYCjv2r1BoD53vbjQNOgeU+KTXJIYy07Q= - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server8k-key.pem mysql-5.1.50/mysql-test/std_data/server8k-key.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server8k-key.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server8k-key.pem 2010-08-27 23:43:10.165365998 -0400 -@@ -1,99 +1,15 @@ - -----BEGIN RSA PRIVATE KEY----- --MIISKQIBAAKCBAEA6h3v1OWb9I9U/Z8diBu/xYGS8NCTD3ZESboHxVI2qSECPgxN --NcG8Lh0ktQdgYcOe64MnDTZX0Bibm47hoDldrAlTSffFxQhylqBBoXxDF+LrhXIq --Cz7K0PsK+bYusL9ezJ7PETDnCT7oy95q4GXbKsutbNsm9if4ZE41gs2KnoU2DA7k --vMmkKojrMIL4+BqTXA20LLo0iSbgvUTvpSJw4u96BeyzMNnxK2wP5vvTtUo5hACb --fU87YjaSKs+q2VXCzfyYGZk1L1xk5GUI0bP+jutf1dDzNttW2/q2Nf5rxx09Gh/G --wmOnEk1O7cOZ8VQCsOHirIM39NuSARsY6Y3G5XM4k2W4nxyR/RtdG9bvs/33aGsZ --5V5yp7WSs8s9HHwaCPSsUiLKckQ7uA0TTRgbeweMrrLKovG57jsbBBB8pQD4PRd3 --1qgxCdstWXHiWwRyI8vOLWENPXPFqA/rJwwqNdWTogy38aqVXxGYR8PIwjA2OaIw --FjwGZcsPNLqw6bgAN8O2UBqZHWiMF8mi7brvioDvAIufZuqa2SqT/At45H83psQ6 --R4FsxZt6SAK7EsdPo8OYTrY1i4iPZd/eKhnEu2srEZgsKRwY5H1mvDH5fWCcHSFu --07sWmlmK6Or65Fsa0IaKLJiQDVVETd6xrI0wkM4AOcbKDrS7aywJ426dopbs+LFd --t4N0cdII4gBgJAfLuuA2yrDXRq4P6cgpVMy0R+0dEYE8zzm8zf1a+Ud273LS9+LB --+LJKwqbW8nOPBoiekimIKfJYoOA4+C/mAjsYl1sVjjEhXJAs9S9L2UvnUk1PsZi4 --UKHI6eAIEl7VM1sQ4GbdZ0px2dF2Ax7pGkhD+DLpYyYkCprharKZdmuUNLUdNhXx --i/HSEiE+Uy+o8RIzmH7LuROl/ZgnfHjJEiBLt2qPvwrwYd4c3XuXWs4YsWfVJTt8 --Mx2ihgVcdGy9//shCSmgJwR1oWrhgC10AEL2fKeRnYUal1i+IxFPp7nb8uwxUADg --R0cY4A3qR/JP489QFIcxBTVs65De+Bq3ecnujk6yeGpD9iptonq4Y8uNZMc1kOE7 --GiFGwR4EufT5SEMh+tUkjth2r+842vmZZuxrVQaohDiATmIJA07W51zKH+nQuw4q --VKnAhPaDLCLc7YMIH9JcmkeQX0nf8/S2O2WYDH8glVDi5hfW08tCmV647vRYnTIy --wUTO0lFpz7M+VyMNaJ6yXU6biBV5hLAI8C5ldr/SWI789W2+ebBaJ9gfK+PTtroh --FSK37GcoSH4V6qSLJHCBASEsiddqHIHMLJZRYD+B6J3tLhjVUM43u+MEGbFTd33Z --Dke/WzLTExWkaOv36e67gDBmgDuj9yroq3wGfwIDAQABAoIEAQCSt6YoZqigz/50 --XvYT6Uf6T6S1lBDFXNmY1qOuDkLBJTWRiwYMDViQEaWCaZgGTKDYeT3M8uR/Phyu --lRFi5vCEMufmcAeZ3hxptw7KU+R8ILJ207/zgit6YglTys9h5txTIack39+6FJmx --wbZ64HpETJZnpMO6+fuZaMXyLjuT8mmXjvHcOgXOvjWeFkZOveDhjJkAesUXuqyX --EI+ajoXuQiPXeKonkD2qd7NTjzfy4gw/ZF4NXs0ZVJeviqtIPo2xp33udOw2vRFh --bMvlF4cNLAbIKYVyOG0ruOfd2I7Unsc/CvD1u5vlRVuUd8OO0JZLIZR7hlRX+A58 --8O1g2H/wJZAsF1BnLnFzDGYCX2WjCCK3Zn85FkKGRa0lTdYDduad/C/N3Y2/pHFE --e7U/2D7IkEei59tD2HcsDBB3MJnckkn/hyiL9qWcxqWZ61vurE+XjU6tc6fnfhk9 --pJQ6yU3epPU7Vfsk0UGA7bbgKpsyzyH8Zl76YC2mN2ZVJjZekfhY+ibT9odEPdOl --yLB5iXA6/WhKkDWaOqZGOH+7MblWgT9wHINlcn+nKzOr00JHl26ac6aMlXXi9vbe --4jgJbFK1HYlFIndyX/BdqRTsFemDoDrVqrEYsaONoVYDd9c5qrqYOeh34DhOksQW --hNwWBfmMlfzgOGtCYhMeK+AajqTtUbMYQA6qp47KJd/Oa5Dvi3ZCpvZh3Ll5iIau --rqCtmojsWCqmpWSu7P+Wu4+O3XkUMPdQUuQ5rJFESEBB3yEJcxqk/RItTcKNElNC --PASrPrMD9cli7S/pJ+frbhu1Gna1ArXzXQE9pMozPaBpjCig7+15R0lL3pmOKO6e --WK3dgSwrnW6TQdLPlSD4lbRoiIdTHVBczztDeUqVvFiV3/cuaEi1nvaVdAYLqjuL --ogK4HwE/FQ54S0ijAsP52n25usoH6OTU3bSd/7NTp0vZCy3yf10x7HUdsh2DvhRO --3+TSK5t0yz0Nt7hNwcI6pLmWUIYcZgpFc/WsiiGscTfhy8rh3kRHI8ylGq53KNF+ --yCVmjqnBRWs91ArxmeF1ctX2t3w5p7gf65hJWqoX/2DiSi5FBsr6HLxa5sUi4wRZ --136aCNt5Wu7w+AzPDbQW6qKUGSyfHJAw4JZasZcaZLise5IWb1ks0DtFbWWdT3ux --8r2AM7IO1WopnekrYCnx/aBvBAv4NjWozVA517ztVttPERt3AGb4nm387nYt5R2U --NO2GBWcDyT8JQLKmffE1AkWolCR1GsvcNLQfLCbnNppgsnsLE/viTG4mq1wjnd8O --2Q8nH1SVTuyGFREMp/zsiAEaGfdd0hI2r1J7OdNPBBCtmhITsy9ZYHqm5vrGvy3s --vi2GuB2RAoICAQD/oWUsg4eTJxHifTJLz/tVSTXnw7DhfbFVa1K1rUV63/MRQAFW --pabN4T6Yfp3CpdRkljCA8KPJZj7euwhm4OEg1ulpOouA+cfWlE9RFE8wyOK5SYwM --k+nk31P9MUC866pZg/ghzBGDub91OW1+ZGEtqnLI/n/LhiAIWt0hJvgZclTc1cAL --xffHVlFwoSyNl/nc3ueZCC95nOLst2XcuxZLLbOFtZCmDYsp49q/Jn6EFjn4Ge2o --qp38z6eZgDMP1F4lb9nDqXPHfUSt2jxKlmpfXS+IPKdba67+EjhbtmUYzaR4EoPI --zh+o6SrVWT6Yve7KGiYv06fuRz1m/lLQO/Arbd9ntSjgn+ZEXGOkbhnHUX3DJ4ny --/6XEGB9NLQjern4uNTn0AaV+uvhncapFMaIBnVfq0Cw8eog0136PBYRaVX7T44j5 --HwIyGXWtYGA/SzDEQoksD0Y/T61BEGnLZaKeavNd82WwFvcYHZtE0J4aQGjCEE7N --+nijzCy+j5ETmme9KJvQHpEyXP3N4RBko1eWvyTwFZDdIXtoa6TTEI51lm+FXJ/b --Y+BzMr6KRo29FB+7//1ptUoMvn5hzL0PwOv2ZSTQuoG5hLDEbxWXLNhd1VHcfznF --3EZHwfD2F8aGQ3kz+fkMTNfK955KorDrmLgvmV9eZZ5yQxGZrs5H5YfKpwKCAgEA --6nSUbzfSdVFUH89NM5FmEJgkD06vqCgHl2mpyF+VmDGcay4K06eA4QbRO5kns13+ --n6PcBl/YVW/rNE8iFi+WxfqUpAjdR1HlShvTuTRVqtFTfuN8XhbYU6VMjKyuE0kd --LKe3KRdwubjVNhXRZLBknU+3Y/4hnIR7mcE3/M5Zv5hjb7XnwWg/SzxV9WojCKiu --vQ7cXhH5/o7EuKcl1d6vueGhWsRylCG9RimwgViR2H7zD9kpkOc0nNym9cSpb0Gv --Lui4cf/fVwIt2HfNEGBjbM/83e2MH6b8Xp1fFAy0aXCdRtOo4LVOzJVAxn5dERMX --4JJ4d5cSFbssDN1bITOKzuytfBqRIQGNkOfizgQNWUiaFI0MhEN/icymjm1ybOIh --Gc9tzqKI4wP2X9g+u3+Oof1QaBcZ4UbZEU9ITN87Pa6XVJmpNx7A81BafWoEPFeE --ahoO4XDwlHZazDuSlOseEShxXcVwaIiqySy7OBEPBVuYdEd2Qw/z3JTx9Kw8MKnf --hu+ar5tz5dPnJIsvLeYCcJDe/K6loiZuHTtPbWEy9p6It7qubQNPBvTSBN5eVDKc --Q2bTQNCx8SAAA9C5gJiwWoQKsXJzbRFRY77P9JjuGpua3YJ2nYBHEJmF+fp1R33c --uHIyMphPMkKC4GC3/43kkMr6tck8kZbXGSYsLsBr2GkCggIBAJvvrjILQianzKcm --zAmnI6AQ+ssYesvyyrxaraeZvSqJdlLtgmOCxVANuQt5IW9djUSWwZvGL4Np1aw0 --15k6UNqhftzsE7FnrVneOsww4WXXBUcV8FKz4Bf3i9qFswILmGzmrfSf8YczRfGS --SJKzVPxwX3jwlrBmbx/pnb7dcLbFIbNcyLvl1ZJJu4BDMVRmgssTRp/5eExtQZg4 --//A4SA8wH7TO3yAMXvn8vrGgH8kfbdlEp88d1SYk3g4rP/rGB3A63NIYikIEzmJn --ICQ3wUfPJnGq3kRMWgEuyCZaCy2oNE3yrWVPJ8z3/2MJ/79ZDVNHxEeki2o1FuW+ --+nGAPq+fZIp03iy4HdVRro7dgugtc9QaSHJtNId8V4vSjviX5Oz3FxUb9AJst58S --nVV8Q2FMxBa/SlzSOkhRtCg2q1gXkzhaMnIVUleRZFGQ2uWBToxKMjcoUifIyN1J --z999bkfI4hBLq5pRSAXz+YVu5SMKa10GaawIwJLat+i+1zboF6QyI2o/Wz8nrsNq --KX/ajFGu5C94WFgsVoWKNI90KBLe48Ssje9c68waBlV/WHMg1YLvU3yqVDOV+K5c --IHB9tPMnG+AgBYZPxSzuvnLrrkj/GeKx0WI7TrvzOLRGKJo6irMEJ8IzFegASRUq --TVZKYQDYRG7m+lKlSxU+pyMAh2c9AoICAE4kavCip1eIssQjYLTGSkFPo/0iGbOv --G9CgXAE3snFWX67tWphupKrbjdMSWcQTmPD2OTg6q6zWL4twsIi6dcMooHAHsFC7 --//LyUV/SDJdxSyXohiQJ8zH1zwy35RDydnHSuF5OvLh53T44iWDI1dAEqLgAFI3J --LjTxzEpLMGiGTuYFt+ejai0WQAQayvBw4ESM9m+4CB2K0hBFTXv5y5HlnNTW0uWC --VUZUUMrbjUieDz8B/zOXi9aYSGFzmZFGUDAPSqJcSMEELemPDF7f8WNr8vi42tIV --4tlaFD1nep4F9bWMiCXU6B2RxVQi+7vcJEIqL1KUnGd3ydfD00K+ng4Xnj7Vz/cz --QE7CqrpFaXmPlCMzW6+dm51/AyhHXDLkL2od05hiXcNkJ7KMLWRqwExHVIxM3shR --x7lYNl3ArUsCrNd6m4aOjnrKFk7kjeLavHxskPccoGKrC9o0JMfTkWLgmuBJFQ0S --N/HzIbcvIFWF0Ms4ojb50yp6ziXhXfJOO/0KUQEki71XIhvw89mVZszDzD5lqzjf --HCZMBU4MbmL6NdEevFIDH0zPPkx3HPNtJt3kIJbit9wI8VhUMe+ldGnGxpWb8tKw --SfM3vrHkYr+lizk26XfXMFhdAuVtT7dzQKSNEyP/1a2Hs307Xzgiv8JulJ8QIkrX --/nsYWPOAGLG5AoICABmdW9Ppkvuhb1AEcjTWb+XCyopoBc6vit/uQWD9uO+CeX7a --cfzq+iH01CAjyVMc4E1JDc5Lpi106U+GRGcAAaPJB2Sp5NznoxaOVrb71blu4Q4x --bNjtKM/P/DXpO+yJYoOPdKtaSDhtnfNDM7H/jztJ3XIrOltKA7CcRDohbBWIx8Q0 --0uEpvfFpZZBco3yVmjP0RLgIVYn/ZDj9wGhSvFWIJ5vv6GXmtDrcHGMLxcfv7t76 --UVcMW/Yy4mYJRCzGOrWagyVijJ6MTVNciqadWcH1KcbB3EGoMFYMn61or2qJABPM --xz89IlhnROU1Re3X/QRx5t86cw6oa+FqrWMOhSs31I0dNWSuS/xDympG27YIYSDd --mv5seT78GjFmMJC5pPOLoXsbTPB0HpsX2/UL/w/eRAfilTOef/Cf9VE5MP/C2YR7 --NBxUU7/+21D6WvdtBTcZbrXWGroAo8zPP+PwX0+c6WoAvqDJvCPndp8xZhSgEJN/ --0kScptezi8n3ZHI95EA9U5mAHxHz0IhDDVzWw/z1f1SBPxKVX3+By3zaa3lrD2ch --cHq7nBkX72veEevnHUY8Z2rHE2G2jdmRfOtwm4sjL0VBV9fRRoxzJWRduKyeOtDL --EhhBhUoTrT48UnfW9hxnbNLB9P/hh+UJu9HrS2uAwHoGE1+8gcyundupGDBn -+MIICXgIBAAKBgQDF2kSVBncWIa+gxDzp+B0tlfljkIw/hrp3dkpSS2uvKfUcqtQ/ -+PkKfbUa6hpCxLczbxjMVo/SvUzNPoVbRqjsmEPdktfm/G7FHjsym1g2qSnfjo2Od -+Ktxl9H+RFzgt1s1OjVNSl26H/GRgpqEArJZs5EKUdRdGb5G13QZH7QXj2wIDAQAB -+AoGBAJLCjh7Q9eLnx+QDzH9s+Q/IcH4nSbERmh1lFEopAc6j29qQ6PGkmDy0DUPs -+70VOCOh5A4mo3aZzm9sUfVb24/nRtmyTP/AtMuIVGCsUqzI28dJRGvRlY0aSQG/C -+ILqMP69kiMNGBvuyEIiJhisOmMvDFEp7HrrXHJM9qcc217DpAkEA4nzJ9yyy2e4O -+r6/D711hdfcU/F+ktXw+pL77kSSdTABUap92Uv2RL36UA4q5h8RNvq/GrzMNm6Ye -+u2IMvBCiTQJBAN+iRbiMJCSitTg5YVMluVbT87co7jbTqk7LN1ujyIFEklm4xlHG -+DLJNgEoDR7QJtAkL++FyogC4zsQsey5voscCQQCp54trTbDuI9QIoAaQrrDKWgz4 -+NpfNPeOQm2UFQT5vIWAyjGWrZGViB8bp0UvVOcJI5nxaOiZfOYOcdrWu75uRAkAn -+67zMc9/j1lPJRJz2Dc7nDBD+ikTz7pcBV897AWLCiK4jbBOi91q+3YzgKXO8VNsZ -+nlUJasA2psbqSBJ5OJ5zAkEA2UxoMju54hASjT54Z92IzraVw4Vo8CYwOcw5fr7z -++m5xg1mmWdLBclmZ+WjARzDuTHIW6u/WCxNGg42AykWzfw== - -----END RSA PRIVATE KEY----- diff --git a/packaging/rpm-uln/mysql-install-test.patch b/packaging/rpm-uln/mysql-install-test.patch deleted file mode 100644 index 5980aea6a9f..00000000000 --- a/packaging/rpm-uln/mysql-install-test.patch +++ /dev/null @@ -1,33 +0,0 @@ -Improve the documentation that will be installed in the mysql-test RPM. - - -diff -Naur mysql-5.1.43.orig/mysql-test/README mysql-5.1.43/mysql-test/README ---- mysql-5.1.43.orig/mysql-test/README 2010-01-15 12:14:43.000000000 -0500 -+++ mysql-5.1.43/mysql-test/README 2010-02-13 21:18:06.000000000 -0500 -@@ -6,6 +6,16 @@ - actually have a co-existing MySQL installation. The tests will not - conflict with it. - -+For use in Red Hat distributions, you should run the script as user mysql, -+so the best bet is something like -+ cd /usr/share/mysql-test -+ sudo -u mysql ./mysql-test-run -+This will use the installed mysql executables, but will run a private copy -+of the server process (using data files within /usr/share/mysql-test), -+so you need not start the mysqld service beforehand. -+To clean up afterwards, remove the created "var" subdirectory, eg -+ sudo -u mysql rm -rf /usr/share/mysql-test/var -+ - All tests must pass. If one or more of them fail on your system, please - read the following manual section for instructions on how to report the - problem: -@@ -25,7 +35,8 @@ - - With no test cases named on the command line, mysql-test-run falls back - to the normal "non-extern" behavior. The reason for this is that some --tests cannot run with an external server. -+tests cannot run with an external server (because they need to control the -+options with which the server is started). - - - You can create your own test cases. To create a test case, create a new diff --git a/packaging/rpm-uln/mysql-strmov.patch b/packaging/rpm-uln/mysql-strmov.patch deleted file mode 100644 index a144d0936e4..00000000000 --- a/packaging/rpm-uln/mysql-strmov.patch +++ /dev/null @@ -1,32 +0,0 @@ -Remove overly optimistic definition of strmov() as stpcpy(). - -mysql uses this macro with overlapping source and destination strings, -which is verboten per spec, and fails on some Red Hat platforms. -Deleting the definition is sufficient to make it fall back to a -byte-at-a-time copy loop, which should consistently give the -expected behavior. - -Note: the particular case that prompted this patch is reported and fixed -at http://bugs.mysql.com/bug.php?id=48864. However, my faith in upstream's -ability to detect this type of error is low, and I also see little evidence -of any real performance gain from optimizing these calls. So I'm keeping -this patch. - - -diff -Naur mysql-5.1.37.orig/include/m_string.h mysql-5.1.37/include/m_string.h ---- mysql-5.1.37.orig/include/m_string.h 2009-07-13 19:08:50.000000000 -0400 -+++ mysql-5.1.37/include/m_string.h 2009-08-31 21:49:49.000000000 -0400 -@@ -81,13 +81,6 @@ - extern void *(*my_str_malloc)(size_t); - extern void (*my_str_free)(void *); - --#if defined(HAVE_STPCPY) --#define strmov(A,B) stpcpy((A),(B)) --#ifndef stpcpy --extern char *stpcpy(char *, const char *); /* For AIX with gcc 2.95.3 */ --#endif --#endif -- - /* Declared in int2str() */ - extern char NEAR _dig_vec_upper[]; - extern char NEAR _dig_vec_lower[]; diff --git a/packaging/rpm-uln/mysql.init b/packaging/rpm-uln/mysql.init deleted file mode 100644 index 310e8cfa023..00000000000 --- a/packaging/rpm-uln/mysql.init +++ /dev/null @@ -1,209 +0,0 @@ -#!/bin/sh -# -# mysqld This shell script takes care of starting and stopping -# the MySQL subsystem (mysqld). -# -# chkconfig: - 64 36 -# description: MySQL database server. -# processname: mysqld -# config: /etc/my.cnf -# pidfile: /var/run/mysqld/mysqld.pid - -# Source function library. -. /etc/rc.d/init.d/functions - -# Source networking configuration. -. /etc/sysconfig/network - - -exec="/usr/bin/mysqld_safe" -prog="mysqld" - -# Set timeouts here so they can be overridden from /etc/sysconfig/mysqld -STARTTIMEOUT=120 -STOPTIMEOUT=60 - -[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog - -lockfile=/var/lock/subsys/$prog - - -# extract value of a MySQL option from config files -# Usage: get_mysql_option SECTION VARNAME DEFAULT -# result is returned in $result -# We use my_print_defaults which prints all options from multiple files, -# with the more specific ones later; hence take the last match. -get_mysql_option(){ - result=`/usr/bin/my_print_defaults "$1" | sed -n "s/^--$2=//p" | tail -n 1` - if [ -z "$result" ]; then - # not found, use default - result="$3" - fi -} - -get_mysql_option mysqld datadir "/var/lib/mysql" -datadir="$result" -get_mysql_option mysqld socket "$datadir/mysql.sock" -socketfile="$result" -get_mysql_option mysqld_safe log-error "/var/log/mysqld.log" -errlogfile="$result" -get_mysql_option mysqld_safe pid-file "/var/run/mysqld/mysqld.pid" -mypidfile="$result" - - -start(){ - [ -x $exec ] || exit 5 - # check to see if it's already running - RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` - if [ $? = 0 ]; then - # already running, do nothing - action $"Starting $prog: " /bin/true - ret=0 - elif echo "$RESPONSE" | grep -q "Access denied for user" - then - # already running, do nothing - action $"Starting $prog: " /bin/true - ret=0 - else - # prepare for start - touch "$errlogfile" - chown mysql:mysql "$errlogfile" - chmod 0640 "$errlogfile" - [ -x /sbin/restorecon ] && /sbin/restorecon "$errlogfile" - if [ ! -d "$datadir/mysql" ] ; then - # First, make sure $datadir is there with correct permissions - if [ ! -e "$datadir" -a ! -h "$datadir" ] - then - mkdir -p "$datadir" || exit 1 - fi - chown mysql:mysql "$datadir" - chmod 0755 "$datadir" - [ -x /sbin/restorecon ] && /sbin/restorecon "$datadir" - # Now create the database - action $"Initializing MySQL database: " /usr/bin/mysql_install_db --datadir="$datadir" --user=mysql - ret=$? - chown -R mysql:mysql "$datadir" - if [ $ret -ne 0 ] ; then - return $ret - fi - fi - chown mysql:mysql "$datadir" - chmod 0755 "$datadir" - # Pass all the options determined above, to ensure consistent behavior. - # In many cases mysqld_safe would arrive at the same conclusions anyway - # but we need to be sure. (An exception is that we don't force the - # log-error setting, since this script doesn't really depend on that, - # and some users might prefer to configure logging to syslog.) - # Note: set --basedir to prevent probes that might trigger SELinux - # alarms, per bug #547485 - $exec --datadir="$datadir" --socket="$socketfile" \ - --pid-file="$mypidfile" \ - --basedir=/usr --user=mysql >/dev/null 2>&1 & - safe_pid=$! - # Spin for a maximum of N seconds waiting for the server to come up; - # exit the loop immediately if mysqld_safe process disappears. - # Rather than assuming we know a valid username, accept an "access - # denied" response as meaning the server is functioning. - ret=0 - TIMEOUT="$STARTTIMEOUT" - while [ $TIMEOUT -gt 0 ]; do - RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` && break - echo "$RESPONSE" | grep -q "Access denied for user" && break - if ! /bin/kill -0 $safe_pid 2>/dev/null; then - echo "MySQL Daemon failed to start." - ret=1 - break - fi - sleep 1 - let TIMEOUT=${TIMEOUT}-1 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Timeout error occurred trying to start MySQL Daemon." - ret=1 - fi - if [ $ret -eq 0 ]; then - action $"Starting $prog: " /bin/true - touch $lockfile - else - action $"Starting $prog: " /bin/false - fi - fi - return $ret -} - -stop(){ - if [ ! -f "$mypidfile" ]; then - # not running; per LSB standards this is "ok" - action $"Stopping $prog: " /bin/true - return 0 - fi - MYSQLPID=`cat "$mypidfile"` - if [ -n "$MYSQLPID" ]; then - /bin/kill "$MYSQLPID" >/dev/null 2>&1 - ret=$? - if [ $ret -eq 0 ]; then - TIMEOUT="$STOPTIMEOUT" - while [ $TIMEOUT -gt 0 ]; do - /bin/kill -0 "$MYSQLPID" >/dev/null 2>&1 || break - sleep 1 - let TIMEOUT=${TIMEOUT}-1 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Timeout error occurred trying to stop MySQL Daemon." - ret=1 - action $"Stopping $prog: " /bin/false - else - rm -f $lockfile - rm -f "$socketfile" - action $"Stopping $prog: " /bin/true - fi - else - action $"Stopping $prog: " /bin/false - fi - else - # failed to read pidfile, probably insufficient permissions - action $"Stopping $prog: " /bin/false - ret=4 - fi - return $ret -} - -restart(){ - stop - start -} - -condrestart(){ - [ -e $lockfile ] && restart || : -} - - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p "$mypidfile" $prog - ;; - restart) - restart - ;; - condrestart|try-restart) - condrestart - ;; - reload) - exit 3 - ;; - force-reload) - restart - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" - exit 2 -esac - -exit $? diff --git a/packaging/rpm-uln/mysql.spec.sh b/packaging/rpm-uln/mysql.spec.sh deleted file mode 100644 index 34aed51048f..00000000000 --- a/packaging/rpm-uln/mysql.spec.sh +++ /dev/null @@ -1,1991 +0,0 @@ -# -# This file was modified by Oracle in 2011 and later. -# Details of the modifications are described in the "changelog" section. -# -# Modifications copyright (c) 2011, 2012, Oracle and/or its -# affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; see the file COPYING. If not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston -# MA 02110-1301 USA. - -############################################################################## -# Some common macro definitions -############################################################################## - -# NOTE: "vendor" is used in upgrade/downgrade check, so you can't -# change these, has to be exactly as is. -# %define mysql_old_vendor MySQL AB # Applies to traditional MySQL RPMs only. -# %define mysql_vendor_2 Sun Microsystems, Inc. -%define mysql_vendor Oracle and/or its affiliates - -%define mysql_version @VERSION@ - -%define mysqldatadir /var/lib/mysql - -%define release 1 - -############################################################################## -# Command line handling -############################################################################## -# -# To set options: -# -# $ rpmbuild --define="option <x>" ... -# - -# ---------------------------------------------------------------------------- -# Commercial builds -# ---------------------------------------------------------------------------- -%if %{undefined commercial} -%define commercial 0 -%endif - -# ---------------------------------------------------------------------------- -# Source name -# ---------------------------------------------------------------------------- -%if %{undefined src_base} -%define src_base mysql -%endif -%define src_dir %{src_base}-%{mysql_version} - -# ---------------------------------------------------------------------------- -# Feature set (storage engines, options). Default to community (everything) -# ---------------------------------------------------------------------------- -%if %{undefined feature_set} -%define feature_set community -%endif - -# ---------------------------------------------------------------------------- -# Server comment strings -# ---------------------------------------------------------------------------- -%if %{undefined compilation_comment_debug} -%define compilation_comment_debug MySQL Community Server - Debug (GPL) -%endif -%if %{undefined compilation_comment_release} -%define compilation_comment_release MySQL Community Server (GPL) -%endif - -# ---------------------------------------------------------------------------- -# Product and server suffixes -# ---------------------------------------------------------------------------- -%if %{undefined product_suffix} - %if %{defined short_product_tag} - %define product_suffix -%{short_product_tag} - %else - %define product_suffix %{nil} - %endif -%endif - -%if %{undefined server_suffix} -%define server_suffix %{nil} -%endif - -# ---------------------------------------------------------------------------- -# Distribution support -# ---------------------------------------------------------------------------- -%if %{undefined distro_specific} -%define distro_specific 0 -%endif -%if %{distro_specific} - %if %(test -f /etc/oracle-release && echo 1 || echo 0) - %define elver %(rpm -qf --qf '%%{version}\\n' /etc/oracle-release | sed -e 's/^\\([0-9]*\\).*/\\1/g') - %if "%elver" == "6" - %define distro_description Oracle Linux 6 - %define distro_releasetag el6 - %define distro_buildreq gcc-c++ ncurses-devel perl readline-devel time zlib-devel - %define distro_requires chkconfig coreutils grep procps shadow-utils net-tools - %else - %{error:Oracle Linux %{elver} is unsupported} - %endif - %else - %if %(test -f /etc/redhat-release && echo 1 || echo 0) - %define rhelver %(rpm -qf --qf '%%{version}\\n' /etc/redhat-release | sed -e 's/^\\([0-9]*\\).*/\\1/g') - %if "%rhelver" == "5" - %define distro_description Red Hat Enterprise Linux 5 - %define distro_releasetag rhel5 - %define distro_buildreq gcc-c++ gperf ncurses-devel perl readline-devel time zlib-devel - %define distro_requires chkconfig coreutils grep procps shadow-utils net-tools - %else - %if "%rhelver" == "6" - %define distro_description Red Hat Enterprise Linux 6 - %define distro_releasetag rhel6 - %define distro_buildreq gcc-c++ ncurses-devel perl readline-devel time zlib-devel - %define distro_requires chkconfig coreutils grep procps shadow-utils net-tools - %else - %{error:Red Hat Enterprise Linux %{rhelver} is unsupported} - %endif - %endif - %else - %if %(test -f /etc/SuSE-release && echo 1 || echo 0) - %define susever %(rpm -qf --qf '%%{version}\\n' /etc/SuSE-release | cut -d. -f1) - %if "%susever" == "10" - %define distro_description SUSE Linux Enterprise Server 10 - %define distro_releasetag sles10 - %define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client readline-devel zlib-devel - %define distro_requires aaa_base coreutils grep procps pwdutils - %else - %if "%susever" == "11" - %define distro_description SUSE Linux Enterprise Server 11 - %define distro_releasetag sles11 - %define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client procps pwdutils readline-devel zlib-devel - %define distro_requires aaa_base coreutils grep procps pwdutils - %else - %{error:SuSE %{susever} is unsupported} - %endif - %endif - %else - %{error:Unsupported distribution} - %endif - %endif - %endif -%else - %define generic_kernel %(uname -r | cut -d. -f1-2) - %define distro_description Generic Linux (kernel %{generic_kernel}) - %define distro_releasetag linux%{generic_kernel} - %define distro_buildreq gcc-c++ gperf ncurses-devel perl readline-devel time zlib-devel - %define distro_requires coreutils grep procps /sbin/chkconfig /usr/sbin/useradd /usr/sbin/groupadd -%endif - -# Avoid debuginfo RPMs, leaves binaries unstripped -%define debug_package %{nil} - -# Hack to work around bug in RHEL5 __os_install_post macro, wrong inverted -# test for __debug_package -%define __strip /bin/true - -# ---------------------------------------------------------------------------- -# Support optional "tcmalloc" library (experimental) -# ---------------------------------------------------------------------------- -%if %{defined malloc_lib_target} -%define WITH_TCMALLOC 1 -%else -%define WITH_TCMALLOC 0 -%endif - -############################################################################## -# Configuration based upon above user input, not to be set directly -############################################################################## - -%if %{commercial} -%define license_files_server %{src_dir}/LICENSE.mysql -%define license_type Commercial -%else -%define license_files_server %{src_dir}/COPYING %{src_dir}/README -%define license_type GPL -%endif - -############################################################################## -# Main spec file section -############################################################################## - -Name: mysql%{product_suffix} -Summary: MySQL client programs and shared libraries -Group: Applications/Databases -Version: @MYSQL_RPM_VERSION@ -Release: %{release}%{?distro_releasetag:.%{distro_releasetag}} -# exceptions allow client libraries to be linked with most open source SW, -# not only GPL code. -License: Copyright (c) 2000, @MYSQL_COPYRIGHT_YEAR@, %{mysql_vendor}. All rights reserved. Under %{license_type} license as shown in the Description field. -URL: http://www.mysql.com/ -Packager: MySQL Release Engineering <mysql-build@oss.oracle.com> -Vendor: %{mysql_vendor} - -# Regression tests may take a long time, override the default to skip them -%{!?runselftest:%global runselftest 1} - -# Upstream has a mirror redirector for downloads, so the URL is hard to -# represent statically. You can get the tarball by following a link from -# http://dev.mysql.com/downloads/mysql/ -Source0: %{src_dir}.tar.gz -# The upstream tarball includes non-free documentation that only the -# copyright holder (MySQL -> Sun -> Oracle) may ship. -# To remove the non-free documentation, run this script after downloading -# the tarball into the current directory: -# ./generate-tarball.sh $VERSION -# Then, source name changes: -# Source0: mysql-%{version}-nodocs.tar.gz -%if %{commercial} -NoSource: 0 -%endif -Source1: generate-tarball.sh -Source2: mysql.init -Source3: my.cnf -Source4: scriptstub.c -Source5: my_config.h -# The below is only needed for packages built outside MySQL -> Sun -> Oracle: -Source6: README.mysql-docs -Source9: mysql-embedded-check.c -# Working around perl dependency checking bug in rpm FTTB. Remove later. -Source999: filter-requires-mysql.sh - -# Patch1: mysql-ssl-multilib.patch Not needed by MySQL (yaSSL), will not work in 5.5 (cmake) -Patch2: mysql-5.5-errno.patch -Patch4: mysql-5.5-testing.patch -Patch5: mysql-install-test.patch -Patch6: mysql-5.5-stack-guard.patch -# Patch7: mysql-disable-test.patch Already fixed in current 5.1 -# Patch8: mysql-setschedparam.patch Will not work in 5.5 (cmake) -# Patch9: mysql-no-docs.patch Will not work in 5.5 (cmake) -Patch10: mysql-strmov.patch - # Not used by MySQL -# Patch12: mysql-cve-2008-7247.patch Already fixed in 5.5 -Patch13: mysql-expired-certs.patch - # Will not be used by MySQL -# Patch14: mysql-missing-string-code.patch Undecided, will not work in 5.5 (cmake) -# Patch15: mysql-lowercase-bug.patch Fixed in MySQL 5.1.54 and 5.5.9 -Patch16: mysql-chain-certs.patch -Patch17: mysql-5.5-libdir.patch -Patch18: mysql-5.5-fix-tests.patch -Patch19: mysql-5.5-mtr1.patch - -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root -BuildRequires: %{distro_buildreq} -BuildRequires: gawk -# make test requires time and ps -BuildRequires: procps -# Socket and Time::HiRes are needed to run regression tests -BuildRequires: perl(Socket), perl(Time::HiRes) - -Requires: %{distro_requires} -Requires: fileutils -Requires: mysql-libs%{product_suffix} = %{version}-%{release} -Requires: bash - -# If %%{product_suffix} is non-empty, the auto-generated capability is insufficient: -# We want all dependency handling to use the generic name only. -# Similar in other sub-packages -Provides: mysql - -# MySQL (with caps) is upstream's spelling of their own RPMs for mysql -Obsoletes: MySQL -# mysql-cluster used to be built from this SRPM, but no more -Obsoletes: mysql-cluster < 5.1.44 -# We need cross-product "Obsoletes:" to allow cross-product upgrades: -Obsoletes: mysql < %{version}-%{release} -Obsoletes: mysql-advanced < %{version}-%{release} - -# Working around perl dependency checking bug in rpm FTTB. Remove later. -%global __perl_requires %{SOURCE999} - -%description -n mysql%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. MySQL is a -client/server implementation consisting of a server daemon (mysqld) -and many different client programs and libraries. The base package -contains the standard MySQL client programs and generic MySQL files. - -The MySQL software has Dual Licensing, which means you can use the MySQL -software free of charge under the GNU General Public License -(http://www.gnu.org/licenses/). You can also purchase commercial MySQL -licenses from %{mysql_vendor} if you do not wish to be bound by the terms of -the GPL. See the chapter "Licensing and Support" in the manual for -further info. - -%package -n mysql-libs%{product_suffix} - -Summary: The shared libraries required for MySQL clients -Group: Applications/Databases -Requires: /sbin/ldconfig -Obsoletes: mysql-libs < %{version}-%{release} -Obsoletes: mysql-libs-advanced < %{version}-%{release} -Provides: mysql-libs - -%description -n mysql-libs%{product_suffix} -The mysql-libs package provides the essential shared libraries for any -MySQL client program or interface. You will need to install this package -to use any other MySQL package or any clients that need to connect to a -MySQL server. - -%package -n mysql-server%{product_suffix} - -Summary: The MySQL server and related files -Group: Applications/Databases -Requires: mysql%{product_suffix} = %{version}-%{release} -Requires: sh-utils -Requires(pre): /usr/sbin/useradd -Requires(post): chkconfig -Requires(preun): chkconfig -# This is for /sbin/service -Requires(preun): initscripts -Requires(postun): initscripts -# mysqlhotcopy needs DBI/DBD support -Requires: perl-DBI, perl-DBD-MySQL -Obsoletes: MySQL-server -Obsoletes: mysql-server < %{version}-%{release} -Obsoletes: mysql-server-advanced < %{version}-%{release} -Provides: mysql-server - -%description -n mysql-server%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. MySQL is a -client/server implementation consisting of a server daemon (mysqld) -and many different client programs and libraries. This package contains -the MySQL server and some accompanying files and directories. - -%package -n mysql-devel%{product_suffix} - -Summary: Files for development of MySQL applications -Group: Applications/Databases -Requires: mysql%{product_suffix} = %{version}-%{release} -Requires: openssl-devel -Obsoletes: MySQL-devel -Obsoletes: mysql-devel < %{version}-%{release} -Obsoletes: mysql-devel-advanced < %{version}-%{release} -Provides: mysql-devel - -%description -n mysql-devel%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains the libraries and header files that are needed for -developing MySQL client applications. - -%package -n mysql-embedded%{product_suffix} - -Summary: MySQL as an embeddable library -Group: Applications/Databases -Obsoletes: mysql-embedded < %{version}-%{release} -Obsoletes: mysql-embedded-advanced < %{version}-%{release} -Provides: mysql-embedded - -%description -n mysql-embedded%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains a version of the MySQL server that can be embedded -into a client application instead of running as a separate process, -as well as a command line client with such an embedded server. - -%package -n mysql-embedded-devel%{product_suffix} - -Summary: Development files for MySQL as an embeddable library -Group: Applications/Databases -Requires: mysql-embedded%{product_suffix} = %{version}-%{release} -Requires: mysql-devel%{product_suffix} = %{version}-%{release} -Obsoletes: mysql-embedded-devel < %{version}-%{release} -Obsoletes: mysql-embedded-devel-advanced < %{version}-%{release} -Provides: mysql-embedded-devel - -%description -n mysql-embedded-devel%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains files needed for developing and testing with -the embedded version of the MySQL server. - -%package -n mysql-test%{product_suffix} - -Summary: The test suite distributed with MySQL -Group: Applications/Databases -Requires: mysql%{product_suffix} = %{version}-%{release} -Requires: mysql-server%{product_suffix} = %{version}-%{release} -Obsoletes: MySQL-test -Obsoletes: mysql-test < %{version}-%{release} -Obsoletes: mysql-test-advanced < %{version}-%{release} -Provides: mysql-test - -%description -n mysql-test%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains the regression test suite distributed with -the MySQL sources. - -%prep -%setup -T -a 0 -c -n %{src_dir} - -cd %{src_dir} # read about "%setup -n" -# %patch1 -p1 -%patch2 -p1 -# %patch4 -p1 TODO / FIXME: if wanted, needs to be adapted to new mysql-test-run setup -%patch5 -p1 -%patch6 -p1 -# %patch8 -p1 -# %patch9 -p1 -# %patch10 -p1 -# %patch13 -p1 -# %patch14 -p1 -%patch16 -p1 -%patch17 -p1 -%patch18 -p1 -%patch19 -p1 - -# workaround for upstream bug #56342 -rm -f mysql-test/t/ssl_8k_key-master.opt - -%build - -# Fail quickly and obviously if user tries to build as root -%if %runselftest - if [ x"`id -u`" = x0 ]; then - echo "The MySQL regression tests may fail if run as root." - echo "If you really need to build the RPM as root, use" - echo "--define='runselftest 0' to skip the regression tests." - exit 1 - fi -%endif - -# Be strict about variables, bail at earliest opportunity, etc. -set -eu - -# Optional package files -touch optional-files-devel - -# -# Set environment in order of preference, MYSQL_BUILD_* first, then variable -# name, finally a default. RPM_OPT_FLAGS is assumed to be a part of the -# default RPM build environment. -# -# We set CXX=gcc by default to support so-called 'generic' binaries, where we -# do not have a dependancy on libgcc/libstdc++. This only works while we do -# not require C++ features such as exceptions, and may need to be removed at -# a later date. -# - -# This is a hack, $RPM_OPT_FLAGS on ia64 hosts contains flags which break -# the compile in cmd-line-utils/readline - needs investigation, but for now -# we simply unset it and use those specified directly in cmake. -%if "%{_arch}" == "ia64" -RPM_OPT_FLAGS= -%endif - -# This goes in sync with Patch19. "rm" is faster than "patch" for this. -rm -rf %{src_dir}/mysql-test/lib/v1 - -export PATH=${MYSQL_BUILD_PATH:-$PATH} -export CC=${MYSQL_BUILD_CC:-${CC:-gcc}} -export CXX=${MYSQL_BUILD_CXX:-${CXX:-gcc}} -export CFLAGS=${MYSQL_BUILD_CFLAGS:-${CFLAGS:-$RPM_OPT_FLAGS}} -# Following "%ifarch" developed by RedHat, MySQL/Oracle does not support/maintain Linux/Sparc: -# gcc seems to have some bugs on sparc as of 4.4.1, back off optimization -# submitted as bz #529298 -%ifarch sparc sparcv9 sparc64 -CFLAGS=`echo $CFLAGS| sed -e "s|-O2|-O1|g" ` -%endif -export CXXFLAGS=${MYSQL_BUILD_CXXFLAGS:-${CXXFLAGS:-$RPM_OPT_FLAGS -felide-constructors -fno-exceptions -fno-rtti}} -export LDFLAGS=${MYSQL_BUILD_LDFLAGS:-${LDFLAGS:-}} -export CMAKE=${MYSQL_BUILD_CMAKE:-${CMAKE:-cmake}} -export MAKE_JFLAG=${MYSQL_BUILD_MAKE_JFLAG:-%{?_smp_mflags}} - -# Build debug mysqld and libmysqld.a -mkdir debug -( - cd debug - # Attempt to remove any optimisation flags from the debug build - CFLAGS=`echo " ${CFLAGS} " | \ - sed -e 's/ -O[0-9]* / /' \ - -e 's/ -unroll2 / /' \ - -e 's/ -ip / /' \ - -e 's/^ //' \ - -e 's/ $//'` - CXXFLAGS=`echo " ${CXXFLAGS} " | \ - sed -e 's/ -O[0-9]* / /' \ - -e 's/ -unroll2 / /' \ - -e 's/ -ip / /' \ - -e 's/^ //' \ - -e 's/ $//'` - # XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before - # XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM - ${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=Debug \ - -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ - -DFEATURE_SET="%{feature_set}" \ - -DCOMPILATION_COMMENT="%{compilation_comment_debug}" \ - -DMYSQL_SERVER_SUFFIX="%{server_suffix}" - echo BEGIN_DEBUG_CONFIG ; egrep '^#define' include/config.h ; echo END_DEBUG_CONFIG - make ${MAKE_JFLAG} VERBOSE=1 -) -# Build full release -mkdir release -( - cd release - # XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before - # XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM - ${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo \ - -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ - -DFEATURE_SET="%{feature_set}" \ - -DCOMPILATION_COMMENT="%{compilation_comment_release}" \ - -DMYSQL_SERVER_SUFFIX="%{server_suffix}" - echo BEGIN_NORMAL_CONFIG ; egrep '^#define' include/config.h ; echo END_NORMAL_CONFIG - make ${MAKE_JFLAG} VERBOSE=1 -) - -# TODO / FIXME: Do we need "scriptstub"? -gcc $CFLAGS $LDFLAGS -o scriptstub "-DLIBDIR=\"%{_libdir}/mysql\"" %{SOURCE4} - -# TODO / FIXME: "libmysqld.so" should have been produced above -# regular build will make libmysqld.a but not libmysqld.so :-( -cd release -mkdir libmysqld/work -cd libmysqld/work -# "libmysqld" provides the same ABI as "libmysqlclient", but it implements the server: -# The shared object is identified by the full version, -# for linkage selection the first two levels are sufficient so that upgrades are possible -# (see "man ld", option "-soname"). -SO_FULL='%{mysql_version}' -SO_USE=`echo $SO_FULL | sed -e 's/\([0-9]\.[0-9]\)\.[0-9]*/\1/'` -# These two modules should pull everything else which is needed: -ar -x ../libmysqld.a client.c.o signal_handler.cc.o -gcc $CFLAGS $LDFLAGS -shared -Wl,-soname,libmysqld.so.$SO_USE -o libmysqld.so.$SO_FULL \ - *.o ../libmysqld.a \ - -lpthread -lcrypt -laio -lnsl -lssl -lcrypto -lz -lrt -lstdc++ -lm -lc -# this is to check that we built a complete library -cp %{SOURCE9} . -PROGNAME=`basename %{SOURCE9} .c` -ln -s libmysqld.so.$SO_FULL libmysqld.so.$SO_USE -gcc -I../../include -I../../../%{src_dir}/include $CFLAGS -o $PROGNAME %{SOURCE9} libmysqld.so.$SO_USE -LD_LIBRARY_PATH=. ldd $PROGNAME -cd ../.. -cd .. - -# TODO / FIXME: autotools only? -# make check - -# TODO / FIXME: Test suite is run elsewhere in release builds - -# do we need this for users who want to build from source? -# Also, check whether MTR_BUILD_THREAD=auto would solve all issues -%if %runselftest - # hack to let 32- and 64-bit tests run concurrently on same build machine - case `uname -m` in - ppc64 | s390x | x86_64 | sparc64 ) - MTR_BUILD_THREAD=7 - ;; - *) - MTR_BUILD_THREAD=11 - ;; - esac - export MTR_BUILD_THREAD - - # if you want to change which tests are run, look at mysql-5.5-testing.patch too. - (cd release && make test-bt-fast ) -%endif - -%install -RBR=$RPM_BUILD_ROOT -MBD=$RPM_BUILD_DIR/%{src_dir} -[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT - -# Ensure that needed directories exists -# TODO / FIXME: needed ? install -d $RBR%{mysqldatadir}/mysql -# TODO / FIXME: needed ? install -d $RBR%{_datadir}/mysql-test -# TODO / FIXME: needed ? install -d $RBR%{_datadir}/mysql/SELinux/RHEL4 -# TODO / FIXME: needed ? install -d $RBR%{_includedir} -# TODO / FIXME: needed ? install -d $RBR%{_libdir} -# TODO / FIXME: needed ? install -d $RBR%{_mandir} -# TODO / FIXME: needed ? install -d $RBR%{_sbindir} - -# Install all binaries -( - cd $MBD/release - make DESTDIR=$RBR install -) - -# For gcc builds, include libgcc.a in the devel subpackage (BUG 4921). Do -# this in a sub-shell to ensure we don't pollute the install environment -# with compiler bits. -( - PATH=${MYSQL_BUILD_PATH:-$PATH} - CC=${MYSQL_BUILD_CC:-${CC:-gcc}} - CFLAGS=${MYSQL_BUILD_CFLAGS:-${CFLAGS:-$RPM_OPT_FLAGS}} - if "${CC}" -v 2>&1 | grep '^gcc.version' >/dev/null 2>&1; then - libgcc=`${CC} ${CFLAGS} --print-libgcc-file` - if [ -f ${libgcc} ]; then - mkdir -p $RBR%{_libdir}/mysql - install -m 644 ${libgcc} $RBR%{_libdir}/mysql/libmygcc.a - echo "%{_libdir}/mysql/libmygcc.a" >>optional-files-devel - fi - fi -) - -# multilib header hacks -# we only apply this to known Red Hat multilib arches, per bug #181335 -case `uname -i` in - i386 | x86_64 | ppc | ppc64 | s390 | s390x | sparc | sparc64 ) - mv $RPM_BUILD_ROOT/usr/include/mysql/my_config.h $RPM_BUILD_ROOT/usr/include/mysql/my_config_`uname -i`.h - install -m 644 %{SOURCE5} $RPM_BUILD_ROOT/usr/include/mysql/ - ;; - *) - ;; -esac - -mkdir -p $RPM_BUILD_ROOT/var/log -touch $RPM_BUILD_ROOT/var/log/mysqld.log - -# List the installed tree for RPM package maintenance purposes. -find $RPM_BUILD_ROOT -print | sed "s|^$RPM_BUILD_ROOT||" | sort > ROOTFILES - -mkdir -p $RPM_BUILD_ROOT/etc/rc.d/init.d -mkdir -p $RPM_BUILD_ROOT/var/run/mysqld -install -m 0755 -d $RPM_BUILD_ROOT/var/lib/mysql -install -m 0755 %{SOURCE2} $RPM_BUILD_ROOT/etc/rc.d/init.d/mysqld -install -m 0644 %{SOURCE3} $RPM_BUILD_ROOT/etc/my.cnf -# obsolete: mv $RPM_BUILD_ROOT/usr/sql-bench $RPM_BUILD_ROOT%{_datadir}/sql-bench # 'sql-bench' is dropped -# obsolete: mv $RPM_BUILD_ROOT/usr/mysql-test $RPM_BUILD_ROOT%{_datadir}/mysql-test # 'mysql-test' is there already -# 5.1.32 forgets to install the mysql-test README file -# obsolete: install -m 0644 mysql-test/README $RPM_BUILD_ROOT%{_datadir}/mysql-test/README # 'README' is there already - -mv ${RPM_BUILD_ROOT}%{_bindir}/mysqlbug ${RPM_BUILD_ROOT}%{_libdir}/mysql/mysqlbug -install -m 0755 scriptstub ${RPM_BUILD_ROOT}%{_bindir}/mysqlbug -mv ${RPM_BUILD_ROOT}%{_bindir}/mysql_config ${RPM_BUILD_ROOT}%{_libdir}/mysql/mysql_config -install -m 0755 scriptstub ${RPM_BUILD_ROOT}%{_bindir}/mysql_config - -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.a -SO_FULL='%{mysql_version}' -SO_USE=`echo $SO_FULL | sed -e 's/\([0-9]\.[0-9]\)\.[0-9]*/\1/'` -install -m 0755 release/libmysqld/work/libmysqld.so.$SO_FULL ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.so.$SO_FULL -ln -s libmysqld.so.$SO_FULL ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.so.$SO_USE -ln -s libmysqld.so.$SO_USE ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.so - -rm -f ${RPM_BUILD_ROOT}%{_bindir}/comp_err -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/comp_err.1* -rm -f ${RPM_BUILD_ROOT}%{_bindir}/make_win_binary_distribution -rm -f ${RPM_BUILD_ROOT}%{_bindir}/make_win_src_distribution -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/make_win_bin_dist.1* -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/make_win_src_distribution.1* -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqlclient*.la -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/*.a -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/plugin/*.la -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/plugin/*.a -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/binary-configure -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/make_binary_distribution -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/make_sharedlib_distribution -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mi_test_all* -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/ndb-config-2-node.ini -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysql.server -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysqld_multi.server -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/MySQL-shared-compat.spec -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/*.plist -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/preinstall -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/postinstall -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysql-*.spec -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysql-log-rotate -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/ChangeLog -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/mysql-stress-test.pl.1* -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/mysql-test-run.pl.1* -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/mysql/solaris - -mkdir -p $RPM_BUILD_ROOT/etc/ld.so.conf.d -echo "%{_libdir}/mysql" > $RPM_BUILD_ROOT/etc/ld.so.conf.d/%{name}-%{_arch}.conf - -# The below *only* applies to builds not done by MySQL / Sun / Oracle: -# copy additional docs into build tree so %%doc will find them -# cp %{SOURCE6} README.mysql-docs - -%clean -[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT - -%pre -n mysql-server%{product_suffix} - -# Check if we can safely upgrade. An upgrade is only safe if it's from one -# of our RPMs in the same version family. - -# Handle both ways of spelling the capability. -installed=`rpm -q --whatprovides mysql-server 2> /dev/null` -if [ $? -ne 0 -o -z "$installed" ]; then - installed=`rpm -q --whatprovides MySQL-server 2> /dev/null` -fi -if [ $? -eq 0 -a -n "$installed" ]; then - installed=`echo $installed | sed 's/\([^ ]*\) .*/\1/'` # Tests have shown duplicated package names - vendor=`rpm -q --queryformat='%{VENDOR}' "$installed" 2>&1` - version=`rpm -q --queryformat='%{VERSION}' "$installed" 2>&1` - myvendor='%{mysql_vendor}' - myversion='%{mysql_version}' - - old_family=`echo $version \ - | sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'` - new_family=`echo $myversion \ - | sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'` - - [ -z "$vendor" ] && vendor='<unknown>' - [ -z "$old_family" ] && old_family="<unrecognized version $version>" - [ -z "$new_family" ] && new_family="<bad package specification: version $myversion>" - - error_text= - if [ "$vendor" != "$myvendor" ]; then - error_text="$error_text -The current MySQL server package is provided by a different -vendor ($vendor) than $myvendor. -Some files may be installed to different locations, including log -files and the service startup script in %{_sysconfdir}/init.d/. -" - fi - - if [ "$old_family" != "$new_family" ]; then - error_text="$error_text -Upgrading directly from MySQL $old_family to MySQL $new_family may not -be safe in all cases. A manual dump and restore using mysqldump is -recommended. It is important to review the MySQL manual's Upgrading -section for version-specific incompatibilities. -" - fi - - if [ -n "$error_text" ]; then - cat <<HERE >&2 - -****************************************************************** -A MySQL server package ($installed) is installed. -$error_text -A manual upgrade is required. - -- Ensure that you have a complete, working backup of your data and my.cnf - files -- Shut down the MySQL server cleanly -- Remove the existing MySQL packages. Usually this command will - list the packages you should remove: - rpm -qa | grep -i '^mysql-' - - You may choose to use 'rpm --nodeps -ev <package-name>' to remove - the package which contains the mysqlclient shared library. The - library will be reinstalled by the MySQL-shared-compat package. -- Install the new MySQL packages supplied by $myvendor -- Ensure that the MySQL server is started -- Run the 'mysql_upgrade' program - -This is a brief description of the upgrade process. Important details -can be found in the MySQL manual, in the Upgrading section. -****************************************************************** -HERE - exit 1 - fi -fi - -/usr/sbin/groupadd -g 27 -o -r mysql >/dev/null 2>&1 || : -/usr/sbin/useradd -M -N -g mysql -o -r -d /var/lib/mysql -s /bin/bash \ - -c "MySQL Server" -u 27 mysql >/dev/null 2>&1 || : - -%post -n mysql-libs%{product_suffix} -/sbin/ldconfig - -%post -n mysql-server%{product_suffix} -if [ $1 = 1 ]; then - /sbin/chkconfig --add mysqld -fi -/bin/chmod 0755 /var/lib/mysql -/bin/touch /var/log/mysqld.log - -%preun -n mysql-server%{product_suffix} -if [ $1 = 0 ]; then - /sbin/service mysqld stop >/dev/null 2>&1 - /sbin/chkconfig --del mysqld -fi - -%postun -n mysql-libs%{product_suffix} -if [ $1 = 0 ] ; then - /sbin/ldconfig -fi - -%postun -n mysql-server%{product_suffix} -if [ $1 -ge 1 ]; then - /sbin/service mysqld condrestart >/dev/null 2>&1 || : -fi - - -%files -n mysql%{product_suffix} -%defattr(-,root,root) -%doc %{license_files_server} - -# The below file *only* applies to builds not done by MySQL / Sun / Oracle: -# %doc README.mysql-docs - -%{_bindir}/msql2mysql -%{_bindir}/mysql -%{_bindir}/mysql_config -%{_bindir}/mysql_find_rows -%{_bindir}/mysql_waitpid -%{_bindir}/mysqlaccess -%{_bindir}/mysqlaccess.conf -%{_bindir}/mysqladmin -%{_bindir}/mysqlbinlog -%{_bindir}/mysqlcheck -%{_bindir}/mysqldump -%{_bindir}/mysqlimport -%{_bindir}/mysqlshow -%{_bindir}/mysqlslap -%{_bindir}/my_print_defaults - -%{_mandir}/man1/mysql.1* -%{_mandir}/man1/mysql_config.1* -%{_mandir}/man1/mysql_find_rows.1* -%{_mandir}/man1/mysql_waitpid.1* -%{_mandir}/man1/mysqlaccess.1* -%{_mandir}/man1/mysqladmin.1* -%{_mandir}/man1/mysqldump.1* -%{_mandir}/man1/mysqlshow.1* -%{_mandir}/man1/mysqlslap.1* -%{_mandir}/man1/my_print_defaults.1* - -%{_libdir}/mysql/mysqlbug -%{_libdir}/mysql/mysql_config - -%files -n mysql-libs%{product_suffix} -%defattr(-,root,root) -%doc %{license_files_server} -# although the default my.cnf contains only server settings, we put it in the -# libs package because it can be used for client settings too. -%config(noreplace) /etc/my.cnf -%dir %{_libdir}/mysql -%{_libdir}/mysql/libmysqlclient*.so.* -/etc/ld.so.conf.d/* - -%dir %{_datadir}/mysql -%{_datadir}/mysql/english -%lang(cs) %{_datadir}/mysql/czech -%lang(da) %{_datadir}/mysql/danish -%lang(nl) %{_datadir}/mysql/dutch -%lang(et) %{_datadir}/mysql/estonian -%lang(fr) %{_datadir}/mysql/french -%lang(de) %{_datadir}/mysql/german -%lang(el) %{_datadir}/mysql/greek -%lang(hu) %{_datadir}/mysql/hungarian -%lang(it) %{_datadir}/mysql/italian -%lang(ja) %{_datadir}/mysql/japanese -%lang(ko) %{_datadir}/mysql/korean -%lang(no) %{_datadir}/mysql/norwegian -%lang(no) %{_datadir}/mysql/norwegian-ny -%lang(pl) %{_datadir}/mysql/polish -%lang(pt) %{_datadir}/mysql/portuguese -%lang(ro) %{_datadir}/mysql/romanian -%lang(ru) %{_datadir}/mysql/russian -%lang(sr) %{_datadir}/mysql/serbian -%lang(sk) %{_datadir}/mysql/slovak -%lang(es) %{_datadir}/mysql/spanish -%lang(sv) %{_datadir}/mysql/swedish -%lang(uk) %{_datadir}/mysql/ukrainian -%{_datadir}/mysql/charsets - -%files -n mysql-server%{product_suffix} -f release/support-files/plugins.files -%defattr(-,root,root) -%doc release/support-files/*.cnf -%if 0%{?commercial} - %doc %{_datadir}/info/mysql.info* -%endif -%doc %{src_dir}/Docs/ChangeLog -%doc %{src_dir}/Docs/INFO_SRC* -%doc release/Docs/INFO_BIN* - -%{_bindir}/myisamchk -%{_bindir}/myisam_ftdump -%{_bindir}/myisamlog -%{_bindir}/myisampack -%{_bindir}/mysql_convert_table_format -%{_bindir}/mysql_fix_extensions -%{_bindir}/mysql_install_db -%{_bindir}/mysql_plugin -%{_bindir}/mysql_secure_installation -%if %{commercial} -%else -%{_bindir}/mysql_setpermission -%endif -%{_bindir}/mysql_tzinfo_to_sql -%{_bindir}/mysql_upgrade -%{_bindir}/mysql_zap -%{_bindir}/mysqlbug -%{_bindir}/mysqldumpslow -%{_bindir}/mysqld_multi -%{_bindir}/mysqld_safe -%{_bindir}/mysqlhotcopy -%{_bindir}/mysqltest -%{_bindir}/innochecksum -%{_bindir}/perror -%{_bindir}/replace -%{_bindir}/resolve_stack_dump -%{_bindir}/resolveip - -/usr/libexec/mysqld -/usr/libexec/mysqld-debug -%{_libdir}/mysql/plugin/daemon_example.ini - -%if %{WITH_TCMALLOC} -%{_libdir}/mysql/%{malloc_lib_target} -%endif - -# obsolete by "-f release/support-files/plugins.files" above -# %{_libdir}/mysql/plugin - -%{_mandir}/man1/msql2mysql.1* -%{_mandir}/man1/myisamchk.1* -%{_mandir}/man1/myisamlog.1* -%{_mandir}/man1/myisampack.1* -%{_mandir}/man1/mysql_convert_table_format.1* -%{_mandir}/man1/myisam_ftdump.1* -%{_mandir}/man1/mysql.server.1* -%{_mandir}/man1/mysql_fix_extensions.1* -%{_mandir}/man1/mysql_install_db.1* -%{_mandir}/man1/mysql_plugin.1* -%{_mandir}/man1/mysql_secure_installation.1* -%{_mandir}/man1/mysql_upgrade.1* -%{_mandir}/man1/mysql_zap.1* -%{_mandir}/man1/mysqlbug.1* -%{_mandir}/man1/mysqldumpslow.1* -%{_mandir}/man1/mysqlbinlog.1* -%{_mandir}/man1/mysqlcheck.1* -%{_mandir}/man1/mysqld_multi.1* -%{_mandir}/man1/mysqld_safe.1* -%{_mandir}/man1/mysqlhotcopy.1* -%{_mandir}/man1/mysqlimport.1* -%{_mandir}/man1/mysqlman.1* -%if %{commercial} -%else -%{_mandir}/man1/mysql_setpermission.1* -%endif -%{_mandir}/man1/mysqltest.1* -%{_mandir}/man1/innochecksum.1* -%{_mandir}/man1/perror.1* -%{_mandir}/man1/replace.1* -%{_mandir}/man1/resolve_stack_dump.1* -%{_mandir}/man1/resolveip.1* -%{_mandir}/man1/mysql_tzinfo_to_sql.1* -%{_mandir}/man8/mysqld.8* - -%{_datadir}/mysql/errmsg-utf8.txt -%{_datadir}/mysql/fill_help_tables.sql -%{_datadir}/mysql/magic -%{_datadir}/mysql/mysql_system_tables.sql -%{_datadir}/mysql/mysql_system_tables_data.sql -%{_datadir}/mysql/mysql_test_data_timezone.sql -%{_datadir}/mysql/my-*.cnf -%{_datadir}/mysql/config.*.ini - -/etc/rc.d/init.d/mysqld -%attr(0755,mysql,mysql) %dir /var/run/mysqld -%attr(0755,mysql,mysql) %dir /var/lib/mysql -%attr(0640,mysql,mysql) %config(noreplace) %verify(not md5 size mtime) /var/log/mysqld.log - -# TODO / FIXME: Do we need "libmygcc.a"? If yes, append "-f optional-files-devel" -# and fix the "rm -f" list in the "install" section. -%files -n mysql-devel%{product_suffix} -%defattr(-,root,root) -/usr/include/mysql -/usr/share/aclocal/mysql.m4 -%{_libdir}/mysql/libmysqlclient*.so - -%files -n mysql-embedded%{product_suffix} -%defattr(-,root,root) -%doc %{license_files_server} -%{_libdir}/mysql/libmysqld.so.* -%{_bindir}/mysql_embedded - -%files -n mysql-embedded-devel%{product_suffix} -%defattr(-,root,root) -%{_libdir}/mysql/libmysqld.so -%{_bindir}/mysql_client_test_embedded -%{_bindir}/mysqltest_embedded -%{_mandir}/man1/mysql_client_test_embedded.1* -%{_mandir}/man1/mysqltest_embedded.1* - -%files -n mysql-test%{product_suffix} -%defattr(-,root,root) -%{_bindir}/mysql_client_test -%attr(-,mysql,mysql) %{_datadir}/mysql-test - -%{_mandir}/man1/mysql_client_test.1* - -%changelog -* Tue Nov 05 2013 Balasubramanian Kandasamy <balasubramanian.kandasamy@oracle.com> -- Removed non gpl file mysql.info from community packages - -* Wed Jul 10 2013 Balasubramanian Kandasamy <balasubramanian.kandasamy@oracle.com> -- Removed directory /usr/share/mysql/solaris/postinstall-solaris to resolve build - error - -* Thu Dec 7 2012 Joerg Bruehe <joerg.bruehe@oracle.com> -- Change the way in which "libmysqld.so" is created: Using all object modules - was wrong, gcc / ld can resolve the dependencies from "libmysqld.a". - Also, identify the ".so" version from the MySQL version, "0.0.1" was wrong. - Bug#15972480 - -* Tue Sep 18 2012 Joerg Bruehe <joerg.bruehe@oracle.com> -- Restrict the vendor check to Oracle: There is no history here - which we have to allow for. - -* Thu Jul 26 2012 Joerg Bruehe <joerg.bruehe@oracle.com> -- Add the vendor and release series checks from the traditional MySQL RPM - spec file, to protect against errors happening during upgrades. -- Do some code alignment with the traditional MySQL RPM spec file, - to make synchronous maintenance (and possibly even integration?) easier. - -* Mon Feb 13 2012 Joerg Bruehe <joerg.bruehe@oracle.com> -- Add "Provides:" lines for the generic names of the subpackages, - independent of "product_suffix". - -* Tue Feb 7 2012 Joerg Bruehe <joerg.bruehe@oracle.com> -- Make "mysql_setpermission" and its man page appear in GPL builds only. - -* Thu Nov 24 2011 Joerg Bruehe <joerg.bruehe@oracle.com> -- Add two patches (#18 + #19) regarding the test suite; - version 1 of "mysql-test-run.pl" had to go because the auto-detection - of Perl dependencies does not handle differences between run directory - and delivery location. - -* Thu Nov 3 2011 Joerg Bruehe <joerg.bruehe@oracle.com> -- Adapt from MySQL 5.1 to 5.5, tested using 5.5.17: - - Done by the MySQL Build Team at Oracle: - set as packager, set copyright owner and related info; - - handle command line options, allowing different configurations, platforms, ... - - configurations will show up in the file name as "product_suffix", - - use "-n" for all subpackage specifications, - - license may be GPL or commercial, mention that in the description, - the license output and the included license files will vary, - - commercial is "nosource", - - improve "requires" listings for different platforms, - - explicitly use "product_suffix" in the "requires" entries; - - adapt to 5.5 changes in features and function: - - remove "mysql-bench" package (files are outdated, not maintained), - - no InnoDB plugin, - - the set of plugins will vary by configuration, to control the "server" - package contents use "-f release/support-files/plugins.files" in the - "files" section, - - remove "mysqlmanager", "mysql_fix_privilege_tables", - - add "mysql_embedded", "mysql-plugin", "mysqlaccess.conf", "magic", - - "errmsg.txt" is now in UTF8: "errmsg-utf8.txt", - - adapt patches to changed code where needed, rename these to include "5.5", - - stop using patches which are not applicable to 5.5; - - 5.5 uses a different way of building: - - autotools are replaced by cmake, - - both a "release" and a "debug" server are built in separate subtrees - ("out of source"!), this also affects path names in further handling, - - the debug server is added to the "server" subpackage, - - add "mysql-5.5-libdir.patch" to handle file placement at user site. - -* Mon Dec 20 2010 Tom Lane <tgl@redhat.com> 5.1.52-1.1 -- Update to MySQL 5.1.52, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-52.html - including numerous small security issues -Resolves: #652553 -- Sync with current Fedora package; this includes: -- Duplicate COPYING and EXCEPTIONS-CLIENT in -libs and -embedded subpackages, - to ensure they are available when any subset of mysql RPMs are installed, - per revised packaging guidelines -- Allow init script's STARTTIMEOUT/STOPTIMEOUT to be overridden from sysconfig - -* Thu Jul 15 2010 Tom Lane <tgl@redhat.com> 5.1.47-4 -- Add backported patch for CVE-2010-2008 (upstream bug 53804) -Resolves: #614215 -- Add BuildRequires perl(Time::HiRes) ... seems to no longer be installed - by just pulling in perl. - -* Mon Jun 28 2010 Tom Lane <tgl@redhat.com> 5.1.47-3 -- Add -p "$mypidfile" to initscript's status call to improve corner cases. - (Note: can't be fixed in Fedora until 595597 is fixed there.) -Resolves: #596008 - -* Mon Jun 7 2010 Tom Lane <tgl@redhat.com> 5.1.47-2 -- Add back "partition" storage engine -Resolves: #598585 -- Fix broken "federated" storage engine plugin -Resolves: #587170 -- Read all certificates in SSL certificate files, to support chained certs -Resolves: #598656 - -* Mon May 24 2010 Tom Lane <tgl@redhat.com> 5.1.47-1 -- Update to MySQL 5.1.47, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-47.html - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-46.html - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-45.html - including fixes for CVE-2010-1621, CVE-2010-1626, - CVE-2010-1848, CVE-2010-1849, CVE-2010-1850 -Resolves: #590598 -- Create mysql group explicitly in pre-server script, to ensure correct GID - -* Mon Mar 8 2010 Tom Lane <tgl@redhat.com> 5.1.44-2 -- Update to MySQL 5.1.44, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-44.html -Resolves: #565554 -- Remove mysql.info, which is not freely redistributable -Related: #560181 -- Revert broken upstream fix for their bug 45058 -Related: #566547 -- Bring init script into some modicum of compliance with Fedora/LSB standards -Resolves: #557711 -Resolves: #562749 - -* Mon Feb 15 2010 Tom Lane <tgl@redhat.com> 5.1.43-2 -- Update to MySQL 5.1.43, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-43.html -Resolves: #565554 -- Remove mysql-cluster, which is no longer supported by upstream in this - source distribution. If we want it we'll need a separate SRPM for it. -Resolves: #565210 - -* Fri Jan 29 2010 Tom Lane <tgl@redhat.com> 5.1.42-7 -- Add backported patch for CVE-2008-7247 (upstream bug 39277) -Resolves: #549329 -- Use non-expired certificates for SSL testing (upstream bug 50702) - -* Tue Jan 26 2010 Tom Lane <tgl@redhat.com> 5.1.42-6 -- Emit explicit error message if user tries to build RPM as root -Resolves: #558915 - -* Wed Jan 20 2010 Tom Lane <tgl@redhat.com> 5.1.42-5 -- Correct Source0: tag and comment to reflect how to get the tarball - -* Fri Jan 8 2010 Tom Lane <tgl@redhat.com> 5.1.42-4 -- Sync with current Fedora build, including: -- Update to MySQL 5.1.42, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-42.html -- Disable symbolic links by default in /etc/my.cnf -Resolves: #553653 -- Remove static libraries (.a files) from package, per packaging guidelines -- Change %%define to %%global, per packaging guidelines -- Disable building the innodb plugin; it tickles assorted gcc bugs and - doesn't seem entirely ready for prime time anyway. -Resolves: #553632 -- Start mysqld_safe with --basedir=/usr, to avoid unwanted SELinux messages - (see 547485) -- Stop waiting during "service mysqld start" if mysqld_safe exits -Resolves: #544095 - -* Mon Nov 23 2009 Tom Lane <tgl@redhat.com> 5.1.41-1 -- Update to MySQL 5.1.41, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-41.html - including fixes for CVE-2009-4019 -Resolves: #549327 -- Don't set old_passwords=1; we aren't being bug-compatible with 3.23 anymore -Resolves: #540735 - -* Tue Nov 10 2009 Tom Lane <tgl@redhat.com> 5.1.40-1 -- Update to MySQL 5.1.40, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-40.html -- Do not force the --log-error setting in mysqld init script -Resolves: #533736 - -* Sat Oct 17 2009 Tom Lane <tgl@redhat.com> 5.1.39-4 -- Replace kluge fix for ndbd sparc crash with a real fix (mysql bug 48132) - -* Thu Oct 15 2009 Tom Lane <tgl@redhat.com> 5.1.39-3 -- Work around two different compiler bugs on sparc, one by backing off - optimization from -O2 to -O1, and the other with a klugy patch -Related: #529298, #529299 -- Clean up bogosity in multilib stub header support: ia64 should not be - listed (it's not multilib), sparc and sparc64 should be - -* Wed Sep 23 2009 Tom Lane <tgl@redhat.com> 5.1.39-2 -- Work around upstream bug 46895 by disabling outfile_loaddata test - -* Tue Sep 22 2009 Tom Lane <tgl@redhat.com> 5.1.39-1 -- Update to MySQL 5.1.39, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-39.html - -* Mon Aug 31 2009 Tom Lane <tgl@redhat.com> 5.1.37-5 -- Work around unportable assumptions about stpcpy(); re-enable main.mysql test -- Clean up some obsolete parameters to the configure script - -* Sat Aug 29 2009 Tom Lane <tgl@redhat.com> 5.1.37-4 -- Remove one misguided patch; turns out I was chasing a glibc bug -- Temporarily disable "main.mysql" test; there's something broken there too, - but we need to get mysql built in rawhide for dependency reasons - -* Fri Aug 21 2009 Tomas Mraz <tmraz@redhat.com> - 5.1.37-3 -- rebuilt with new openssl - -* Fri Aug 14 2009 Tom Lane <tgl@redhat.com> 5.1.37-2 -- Add a couple of patches to improve the probability of the regression tests - completing in koji builds - -* Sun Aug 2 2009 Tom Lane <tgl@redhat.com> 5.1.37-1 -- Update to MySQL 5.1.37, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-37.html - -* Sat Jul 25 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 5.1.36-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild - -* Fri Jul 10 2009 Tom Lane <tgl@redhat.com> 5.1.36-1 -- Update to MySQL 5.1.36, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-36.html - -* Sat Jun 6 2009 Tom Lane <tgl@redhat.com> 5.1.35-1 -- Update to MySQL 5.1.35, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-35.html -- Ensure that /var/lib/mysql is created with the right SELinux context -Resolves: #502966 - -* Fri May 15 2009 Tom Lane <tgl@redhat.com> 5.1.34-1 -- Update to MySQL 5.1.34, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-34.html -- Increase startup timeout per bug #472222 - -* Wed Apr 15 2009 Tom Lane <tgl@redhat.com> 5.1.33-2 -- Increase stack size of ndbd threads for safety's sake. -Related: #494631 - -* Tue Apr 7 2009 Tom Lane <tgl@redhat.com> 5.1.33-1 -- Update to MySQL 5.1.33. -- Disable use of pthread_setschedparam; doesn't work the way code expects. -Related: #477624 - -* Wed Mar 4 2009 Tom Lane <tgl@redhat.com> 5.1.32-1 -- Update to MySQL 5.1.32. - -* Wed Feb 25 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 5.1.31-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild - -* Fri Feb 13 2009 Tom Lane <tgl@redhat.com> 5.1.31-1 -- Update to MySQL 5.1.31. - -* Thu Jan 22 2009 Tom Lane <tgl@redhat.com> 5.1.30-2 -- hm, apparently --with-innodb and --with-ndbcluster are still needed - even though no longer documented ... - -* Thu Jan 22 2009 Tom Lane <tgl@redhat.com> 5.1.30-1 -- Update to MySQL 5.1.30. Note that this includes an ABI break for - libmysqlclient (it's now got .so major version 16). -- This also updates mysql for new openssl build - -* Wed Oct 1 2008 Tom Lane <tgl@redhat.com> 5.0.67-2 -- Build the "embedded server" library, and package it in a new sub-RPM - mysql-embedded, along with mysql-embedded-devel for devel support files. -Resolves: #149829 - -* Sat Aug 23 2008 Tom Lane <tgl@redhat.com> 5.0.67-1 -- Update to mysql version 5.0.67 -- Move mysql_config's man page to base package, again (apparently I synced - that change the wrong way while importing specfile changes for ndbcluster) - -* Sun Jul 27 2008 Tom Lane <tgl@redhat.com> 5.0.51a-2 -- Enable ndbcluster support -Resolves: #163758 -- Suppress odd crash messages during package build, caused by trying to - build dbug manual (which we don't install anyway) with dbug disabled -Resolves: #437053 -- Improve mysql.init to pass configured datadir to mysql_install_db, - and to force user=mysql for both mysql_install_db and mysqld_safe. -Related: #450178 - -* Mon Mar 3 2008 Tom Lane <tgl@redhat.com> 5.0.51a-1 -- Update to mysql version 5.0.51a - -* Mon Mar 3 2008 Tom Lane <tgl@redhat.com> 5.0.45-11 -- Fix mysql-stack-guard patch to work correctly on IA64 -- Fix mysql.init to wait correctly when socket is not in default place -Related: #435494 - -* Mon Mar 03 2008 Dennis Gilmore <dennis@ausil.us> 5.0.45-10 -- add sparc64 to 64 bit arches for test suite checking -- add sparc, sparcv9 and sparc64 to multilib handling - -* Thu Feb 28 2008 Tom Lane <tgl@redhat.com> 5.0.45-9 -- Fix the stack overflow problem encountered in January. It seems the real -issue is that the buildfarm machines were moved to RHEL5, which uses 64K not -4K pages on PPC, and because RHEL5 takes the guard area out of the requested -thread stack size we no longer had enough headroom. -Related: #435337 - -* Tue Feb 19 2008 Fedora Release Engineering <rel-eng@fedoraproject.org> - 5.0.45-8 -- Autorebuild for GCC 4.3 - -* Tue Jan 8 2008 Tom Lane <tgl@redhat.com> 5.0.45-7 -- Unbelievable ... upstream still thinks that it's a good idea to have a - regression test that is guaranteed to begin failing come January 1. -- ... and it seems we need to raise STACK_MIN_SIZE again too. - -* Thu Dec 13 2007 Tom Lane <tgl@redhat.com> 5.0.45-6 -- Back-port upstream fixes for CVE-2007-5925, CVE-2007-5969, CVE-2007-6303. -Related: #422211 - -* Wed Dec 5 2007 Tom Lane <tgl@redhat.com> 5.0.45-5 -- Rebuild for new openssl - -* Sat Aug 25 2007 Tom Lane <tgl@redhat.com> 5.0.45-4 -- Seems we need explicit BuildRequires on gawk and procps now -- Rebuild to fix Fedora toolchain issues - -* Sun Aug 12 2007 Tom Lane <tgl@redhat.com> 5.0.45-3 -- Recent perl changes in rawhide mean we need a more specific BuildRequires - -* Thu Aug 2 2007 Tom Lane <tgl@redhat.com> 5.0.45-2 -- Update License tag to match code. -- Work around recent Fedora change that makes "open" a macro name. - -* Sun Jul 22 2007 Tom Lane <tgl@redhat.com> 5.0.45-1 -- Update to MySQL 5.0.45 -Resolves: #246535 -- Move mysql_config's man page to base package -Resolves: #245770 -- move my_print_defaults to base RPM, for consistency with Stacks packaging -- mysql user is no longer deleted at RPM uninstall -Resolves: #241912 - -* Thu Mar 29 2007 Tom Lane <tgl@redhat.com> 5.0.37-2 -- Use a less hacky method of getting default values in initscript -Related: #233771, #194596 -- Improve packaging of mysql-libs per suggestions from Remi Collet -Resolves: #233731 -- Update default /etc/my.cnf ([mysql.server] has been bogus for a long time) - -* Mon Mar 12 2007 Tom Lane <tgl@redhat.com> 5.0.37-1 -- Update to MySQL 5.0.37 -Resolves: #231838 -- Put client library into a separate mysql-libs RPM to reduce dependencies -Resolves: #205630 - -* Fri Feb 9 2007 Tom Lane <tgl@redhat.com> 5.0.33-1 -- Update to MySQL 5.0.33 -- Install band-aid fix for "view" regression test designed to fail after 2006 -- Don't chmod -R the entire database directory tree on every startup -Related: #221085 -- Fix unsafe use of install-info -Resolves: #223713 -- Cope with new automake in F7 -Resolves: #224171 - -* Thu Nov 9 2006 Tom Lane <tgl@redhat.com> 5.0.27-1 -- Update to MySQL 5.0.27 (see CVE-2006-4031, CVE-2006-4226, CVE-2006-4227) -Resolves: #202247, #202675, #203427, #203428, #203432, #203434, #208641 -- Fix init script to return status 1 on server start timeout -Resolves: #203910 -- Move mysqldumpslow from base package to mysql-server -Resolves: #193559 -- Adjust link options for BDB module -Resolves: #199368 - -* Wed Jul 12 2006 Jesse Keating <jkeating@redhat.com> - 5.0.22-2.1 -- rebuild - -* Sat Jun 10 2006 Tom Lane <tgl@redhat.com> 5.0.22-2 -- Work around brew's tendency not to clean up failed builds completely, - by adding code in mysql-testing.patch to kill leftover mysql daemons. - -* Thu Jun 8 2006 Tom Lane <tgl@redhat.com> 5.0.22-1 -- Update to MySQL 5.0.22 (fixes CVE-2006-2753) -- Install temporary workaround for gcc bug on s390x (bz #193912) - -* Tue May 2 2006 Tom Lane <tgl@redhat.com> 5.0.21-2 -- Fix bogus perl Requires for mysql-test - -* Mon May 1 2006 Tom Lane <tgl@redhat.com> 5.0.21-1 -- Update to MySQL 5.0.21 - -* Mon Mar 27 2006 Tom Lane <tgl@redhat.com> 5.0.18-4 -- Modify multilib header hack to not break non-RH arches, per bug #181335 -- Remove logrotate script, per bug #180639. -- Add a new mysql-test RPM to carry the regression test files; - hack up test scripts as needed to make them run in /usr/share/mysql-test. - -* Fri Feb 10 2006 Jesse Keating <jkeating@redhat.com> - 5.0.18-2.1 -- bump again for double-long bug on ppc(64) - -* Thu Feb 9 2006 Tom Lane <tgl@redhat.com> 5.0.18-2 -- err-log option has been renamed to log-error, fix my.cnf and initscript - -* Tue Feb 07 2006 Jesse Keating <jkeating@redhat.com> - 5.0.18-1.1 -- rebuilt for new gcc4.1 snapshot and glibc changes - -* Thu Jan 5 2006 Tom Lane <tgl@redhat.com> 5.0.18-1 -- Update to MySQL 5.0.18 - -* Thu Dec 15 2005 Tom Lane <tgl@redhat.com> 5.0.16-4 -- fix my_config.h for ppc platforms - -* Thu Dec 15 2005 Tom Lane <tgl@redhat.com> 5.0.16-3 -- my_config.h needs to guard against 64-bit platforms that also define the - 32-bit symbol - -* Wed Dec 14 2005 Tom Lane <tgl@redhat.com> 5.0.16-2 -- oops, looks like we want uname -i not uname -m - -* Mon Dec 12 2005 Tom Lane <tgl@redhat.com> 5.0.16-1 -- Update to MySQL 5.0.16 -- Add EXCEPTIONS-CLIENT license info to the shipped documentation -- Make my_config.h architecture-independent for multilib installs; - put the original my_config.h into my_config_$ARCH.h -- Add -fwrapv to CFLAGS so that gcc 4.1 doesn't break it - -* Fri Dec 09 2005 Jesse Keating <jkeating@redhat.com> -- rebuilt - -* Mon Nov 14 2005 Tom Lane <tgl@redhat.com> 5.0.15-3 -- Make stop script wait for daemon process to disappear (bz#172426) - -* Wed Nov 9 2005 Tom Lane <tgl@redhat.com> 5.0.15-2 -- Rebuild due to openssl library update. - -* Thu Nov 3 2005 Tom Lane <tgl@redhat.com> 5.0.15-1 -- Update to MySQL 5.0.15 (scratch build for now) - -* Wed Oct 5 2005 Tom Lane <tgl@redhat.com> 4.1.14-1 -- Update to MySQL 4.1.14 - -* Tue Aug 23 2005 Tom Lane <tgl@redhat.com> 4.1.12-3 -- Use politically correct patch name. - -* Tue Jul 12 2005 Tom Lane <tgl@redhat.com> 4.1.12-2 -- Fix buffer overflow newly exposed in isam code; it's the same issue - previously found in myisam, and not very exciting, but I'm tired of - seeing build warnings. - -* Mon Jul 11 2005 Tom Lane <tgl@redhat.com> 4.1.12-1 -- Update to MySQL 4.1.12 (includes a fix for bz#158688, bz#158689) -- Extend mysql-test-ssl.patch to solve rpl_openssl test failure (bz#155850) -- Update mysql-lock-ssl.patch to match the upstream committed version -- Add --with-isam to re-enable the old ISAM table type, per bz#159262 -- Add dependency on openssl-devel per bz#159569 -- Remove manual.txt, as upstream decided not to ship it anymore; - it was redundant with the mysql.info file anyway. - -* Mon May 9 2005 Tom Lane <tgl@redhat.com> 4.1.11-4 -- Include proper locking for OpenSSL in the server, per bz#155850 - -* Mon Apr 25 2005 Tom Lane <tgl@redhat.com> 4.1.11-3 -- Enable openssl tests during build, per bz#155850 -- Might as well turn on --disable-dependency-tracking - -* Fri Apr 8 2005 Tom Lane <tgl@redhat.com> 4.1.11-2 -- Avoid dependency on <asm/atomic.h>, cause it won't build anymore on ia64. - This is probably a cleaner solution for bz#143537, too. - -* Thu Apr 7 2005 Tom Lane <tgl@redhat.com> 4.1.11-1 -- Update to MySQL 4.1.11 to fix bz#152911 as well as other issues -- Move perl-DBI, perl-DBD-MySQL dependencies to server package (bz#154123) -- Override configure thread library test to suppress HAVE_LINUXTHREADS check -- Fix BDB failure on s390x (bz#143537) -- At last we can enable "make test" on all arches - -* Fri Mar 11 2005 Tom Lane <tgl@redhat.com> 4.1.10a-1 -- Update to MySQL 4.1.10a to fix security vulnerabilities (bz#150868, - for CAN-2005-0711, and bz#150871 for CAN-2005-0709, CAN-2005-0710). - -* Sun Mar 6 2005 Tom Lane <tgl@redhat.com> 4.1.10-3 -- Fix package Requires: interdependencies. - -* Sat Mar 5 2005 Tom Lane <tgl@redhat.com> 4.1.10-2 -- Need -fno-strict-aliasing in at least one place, probably more. -- Work around some C spec violations in mysql. - -* Fri Feb 18 2005 Tom Lane <tgl@redhat.com> 4.1.10-1 -- Update to MySQL 4.1.10. - -* Sat Jan 15 2005 Tom Lane <tgl@redhat.com> 4.1.9-1 -- Update to MySQL 4.1.9. - -* Wed Jan 12 2005 Tom Lane <tgl@redhat.com> 4.1.7-10 -- Don't assume /etc/my.cnf will specify pid-file (bz#143724) - -* Wed Jan 12 2005 Tim Waugh <twaugh@redhat.com> 4.1.7-9 -- Rebuilt for new readline. - -* Tue Dec 21 2004 Tom Lane <tgl@redhat.com> 4.1.7-8 -- Run make test on all archs except s390x (which seems to have a bdb issue) - -* Mon Dec 13 2004 Tom Lane <tgl@redhat.com> 4.1.7-7 -- Suppress someone's silly idea that libtool overhead can be skipped - -* Sun Dec 12 2004 Tom Lane <tgl@redhat.com> 4.1.7-6 -- Fix init script to not need a valid username for startup check (bz#142328) -- Fix init script to honor settings appearing in /etc/my.cnf (bz#76051) -- Enable SSL (bz#142032) - -* Thu Dec 2 2004 Tom Lane <tgl@redhat.com> 4.1.7-5 -- Add a restorecon to keep the mysql.log file in the right context (bz#143887) - -* Tue Nov 23 2004 Tom Lane <tgl@redhat.com> 4.1.7-4 -- Turn off old_passwords in default /etc/my.cnf file, for better compatibility - with mysql 3.x clients (per suggestion from Joe Orton). - -* Fri Oct 29 2004 Tom Lane <tgl@redhat.com> 4.1.7-3 -- Handle ldconfig more cleanly (put a file in /etc/ld.so.conf.d/). - -* Thu Oct 28 2004 Tom Lane <tgl@redhat.com> 4.1.7-2 -- rebuild in devel branch - -* Wed Oct 27 2004 Tom Lane <tgl@redhat.com> 4.1.7-1 -- Update to MySQL 4.1.x. - -* Tue Oct 12 2004 Tom Lane <tgl@redhat.com> 3.23.58-13 -- fix security issues CAN-2004-0835, CAN-2004-0836, CAN-2004-0837 - (bugs #135372, 135375, 135387) -- fix privilege escalation on GRANT ALL ON `Foo\_Bar` (CAN-2004-0957) - -* Wed Oct 06 2004 Tom Lane <tgl@redhat.com> 3.23.58-12 -- fix multilib problem with mysqlbug and mysql_config -- adjust chkconfig priority per bug #128852 -- remove bogus quoting per bug #129409 (MySQL 4.0 has done likewise) -- add sleep to mysql.init restart(); may or may not fix bug #133993 - -* Tue Oct 05 2004 Tom Lane <tgl@redhat.com> 3.23.58-11 -- fix low-priority security issues CAN-2004-0388, CAN-2004-0381, CAN-2004-0457 - (bugs #119442, 125991, 130347, 130348) -- fix bug with dropping databases under recent kernels (bug #124352) - -* Tue Jun 15 2004 Elliot Lee <sopwith@redhat.com> 3.23.58-10 -- rebuilt - -* Sat Apr 17 2004 Warren Togami <wtogami@redhat.com> 3.23.58-9 -- remove redundant INSTALL-SOURCE, manual.* -- compress manual.txt.bz2 -- BR time - -* Tue Mar 16 2004 Tom Lane <tgl@redhat.com> 3.23.58-8 -- repair logfile attributes in %%files, per bug #102190 -- repair quoting problem in mysqlhotcopy, per bug #112693 -- repair missing flush in mysql_setpermission, per bug #113960 -- repair broken error message printf, per bug #115165 -- delete mysql user during uninstall, per bug #117017 -- rebuilt - -* Tue Mar 02 2004 Elliot Lee <sopwith@redhat.com> -- rebuilt - -* Tue Feb 24 2004 Tom Lane <tgl@redhat.com> -- fix chown syntax in mysql.init -- rebuild - -* Fri Feb 13 2004 Elliot Lee <sopwith@redhat.com> -- rebuilt - -* Tue Nov 18 2003 Kim Ho <kho@redhat.com> 3.23.58-5 -- update mysql.init to use anonymous user (UNKNOWN_MYSQL_USER) for - pinging mysql server (#108779) - -* Mon Oct 27 2003 Kim Ho <kho@redhat.com> 3.23.58-4 -- update mysql.init to wait (max 10 seconds) for mysql server to - start (#58732) - -* Mon Oct 27 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.58-3 -- re-enable Berkeley DB support (#106832) -- re-enable ia64 testing - -* Fri Sep 19 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.58-2 -- rebuilt - -* Mon Sep 15 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.58-1 -- upgrade to 3.23.58 for security fix - -* Tue Aug 26 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.57-2 -- rebuilt - -* Wed Jul 02 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.57-1 -- revert to prior version of MySQL due to license incompatibilities - with packages that link against the client. The MySQL folks are - looking into the issue. - -* Wed Jun 18 2003 Patrick Macdonald <patrickm@redhat.com> 4.0.13-4 -- restrict test on ia64 (temporary) - -* Wed Jun 04 2003 Elliot Lee <sopwith@redhat.com> 4.0.13-3 -- rebuilt - -* Thu May 29 2003 Patrick Macdonald <patrickm@redhat.com> 4.0.13-2 -- fix filter-requires-mysql.sh with less restrictive for mysql-bench - -* Wed May 28 2003 Patrick Macdonald <patrickm@redhat.com> 4.0.13-1 -- update for MySQL 4.0 -- back-level shared libraries available in mysqlclient10 package - -* Fri May 09 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.56-2 -- add sql-bench package (#90110) - -* Wed Mar 19 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.56-1 -- upgrade to 3.23.56 for security fixes -- remove patch for double-free (included in 3.23.56) - -* Tue Feb 18 2003 Patrick Macdonald <patrickm@redhat.com> 3.23.54a-11 -- enable thread safe client -- add patch for double free fix - -* Wed Jan 22 2003 Tim Powers <timp@redhat.com> -- rebuilt - -* Mon Jan 13 2003 Karsten Hopp <karsten@redhat.de> 3.23.54a-9 -- disable checks on s390x - -* Sat Jan 4 2003 Jeff Johnson <jbj@redhat.com> 3.23.54a-8 -- use internal dep generator. - -* Wed Jan 1 2003 Bill Nottingham <notting@redhat.com> 3.23.54a-7 -- fix mysql_config on hammer - -* Sun Dec 22 2002 Tim Powers <timp@redhat.com> 3.23.54a-6 -- don't use rpms internal dep generator - -* Tue Dec 17 2002 Elliot Lee <sopwith@redhat.com> 3.23.54a-5 -- Push it into the build system - -* Mon Dec 16 2002 Joe Orton <jorton@redhat.com> 3.23.54a-4 -- upgrade to 3.23.54a for safe_mysqld fix - -* Thu Dec 12 2002 Joe Orton <jorton@redhat.com> 3.23.54-3 -- upgrade to 3.23.54 for latest security fixes - -* Tue Nov 19 2002 Jakub Jelinek <jakub@redhat.com> 3.23.52-5 -- Always include <errno.h> for errno -- Remove unpackaged files - -* Tue Nov 12 2002 Florian La Roche <Florian.LaRoche@redhat.de> -- do not prereq userdel, not used at all - -* Mon Sep 9 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.52-4 -- Use %%{_libdir} -- Add patch for x86-64 - -* Wed Sep 4 2002 Jakub Jelinek <jakub@redhat.com> 3.23.52-3 -- rebuilt with gcc-3.2-7 - -* Thu Aug 29 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.52-2 -- Add --enable-local-infile to configure - a new option - which doesn't default to the old behaviour (#72885) - -* Fri Aug 23 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.52-1 -- 3.23.52. Fixes a minor security problem, various bugfixes. - -* Sat Aug 10 2002 Elliot Lee <sopwith@redhat.com> 3.23.51-5 -- rebuilt with gcc-3.2 (we hope) - -* Mon Jul 22 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.51-4 -- rebuild - -* Thu Jul 18 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.51-3 -- Fix #63543 and #63542 - -* Thu Jul 11 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.51-2 -- Turn off bdb on PPC(#68591) -- Turn off the assembly optimizations, for safety. - -* Wed Jun 26 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.51-1 -- Work around annoying auto* thinking this is a crosscompile -- 3.23.51 - -* Fri Jun 21 2002 Tim Powers <timp@redhat.com> -- automated rebuild - -* Mon Jun 10 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.50-2 -- Add dependency on perl-DBI and perl-DBD-MySQL (#66349) - -* Thu May 30 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.50-1 -- 3.23.50 - -* Thu May 23 2002 Tim Powers <timp@redhat.com> -- automated rebuild - -* Mon May 13 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.49-4 -- Rebuild -- Don't set CXX to gcc, it doesn't work anymore -- Exclude Alpha - -* Mon Apr 8 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.49-3 -- Add the various .cnf examples as doc files to mysql-server (#60349) -- Don't include manual.ps, it's just 200 bytes with a URL inside (#60349) -- Don't include random files in /usr/share/mysql (#60349) -- langify (#60349) - -* Thu Feb 21 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.49-2 -- Rebuild - -* Sun Feb 17 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.49-1 -- 3.23.49 - -* Thu Feb 14 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.48-2 -- work around perl dependency bug. - -* Mon Feb 11 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.48-1 -- 3.23.48 - -* Thu Jan 17 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.47-4 -- Use kill, not mysqladmin, to flush logs and shut down. Thus, - an admin password can be set with no problems. -- Remove reload from init script - -* Wed Jan 16 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.47-3 -- remove db3-devel from buildrequires, - MySQL has had its own bundled copy since the mid thirties - -* Sun Jan 6 2002 Trond Eivind Glomsrd <teg@redhat.com> 3.23.47-1 -- 3.23.47 -- Don't build for alpha, toolchain immature. - -* Mon Dec 3 2001 Trond Eivind Glomsrd <teg@redhat.com> 3.23.46-1 -- 3.23.46 -- use -fno-rtti and -fno-exceptions, and set CXX to increase stability. - Recommended by mysql developers. - -* Sun Nov 25 2001 Trond Eivind Glomsrd <teg@redhat.com> 3.23.45-1 -- 3.23.45 - -* Wed Nov 14 2001 Trond Eivind Glomsrd <teg@redhat.com> 3.23.44-2 -- centralize definition of datadir in the initscript (#55873) - -* Fri Nov 2 2001 Trond Eivind Glomsrd <teg@redhat.com> 3.23.44-1 -- 3.23.44 - -* Thu Oct 4 2001 Trond Eivind Glomsrd <teg@redhat.com> 3.23.43-1 -- 3.23.43 - -* Mon Sep 10 2001 Trond Eivind Glomsrd <teg@redhat.com> 3.23.42-1 -- 3.23.42 -- reenable innodb - -* Tue Aug 14 2001 Trond Eivind Glomsrd <teg@redhat.com> 3.23.41-1 -- 3.23.41 bugfix release -- disable innodb, to avoid the broken updates -- Use "mysqladmin flush_logs" instead of kill -HUP in logrotate - script (#51711) - -* Sat Jul 21 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.40, bugfix release -- Add zlib-devel to buildrequires: - -* Fri Jul 20 2001 Trond Eivind Glomsrd <teg@redhat.com> -- BuildRequires-tweaking - -* Thu Jun 28 2001 Trond Eivind Glomsrd <teg@redhat.com> -- Reenable test, but don't run them for s390, s390x or ia64 -- Make /etc/my.cnf config(noplace). Same for /etc/logrotate.d/mysqld - -* Thu Jun 14 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.29 -- enable innodb -- enable assembly again -- disable tests for now... - -* Tue May 15 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.38 -- Don't use BDB on Alpha - no fast mutexes - -* Tue Apr 24 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.37 -- Add _GNU_SOURCE to the compile flags - -* Wed Mar 28 2001 Trond Eivind Glomsrd <teg@redhat.com> -- Make it obsolete our 6.2 PowerTools packages -- 3.23.36 bugfix release - fixes some security issues - which didn't apply to our standard configuration -- Make "make test" part of the build process, except on IA64 - (it fails there) - -* Tue Mar 20 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.35 bugfix release -- Don't delete the mysql user on uninstall - -* Tue Mar 13 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.34a bugfix release - -* Wed Feb 7 2001 Trond Eivind Glomsrd <teg@redhat.com> -- added readline-devel to BuildRequires: - -* Tue Feb 6 2001 Trond Eivind Glomsrd <teg@redhat.com> -- small i18n-fixes to initscript (action needs $) - -* Tue Jan 30 2001 Trond Eivind Glomsrd <teg@redhat.com> -- make it shut down and rotate logs without using mysqladmin - (from #24909) - -* Mon Jan 29 2001 Trond Eivind Glomsrd <teg@redhat.com> -- conflict with "MySQL" - -* Tue Jan 23 2001 Trond Eivind Glomsrd <teg@redhat.com> -- improve gettextizing - -* Mon Jan 22 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.32 -- fix logrotate script (#24589) - -* Wed Jan 17 2001 Trond Eivind Glomsrd <teg@redhat.com> -- gettextize -- move the items in Requires(post): to Requires: in preparation - for an errata for 7.0 when 3.23.31 is released -- 3.23.31 - -* Tue Jan 16 2001 Trond Eivind Glomsrd <teg@redhat.com> -- add the log file to the rpm database, and make it 0640 - (#24116) -- as above in logrotate script -- changes to the init sequence - put most of the data - in /etc/my.cnf instead of hardcoding in the init script -- use /var/run/mysqld/mysqld.pid instead of - /var/run/mysqld/pid -- use standard safe_mysqld -- shut down cleaner - -* Mon Jan 08 2001 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.30 -- do an explicit chmod on /var/lib/mysql in post, to avoid - any problems with broken permissons. There is a report - of rm not changing this on its own (#22989) - -* Mon Jan 01 2001 Trond Eivind Glomsrd <teg@redhat.com> -- bzipped source -- changed from 85 to 78 in startup, so it starts before - apache (which can use modules requiring mysql) - -* Wed Dec 27 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.29a - -* Tue Dec 19 2000 Trond Eivind Glomsrd <teg@redhat.com> -- add requirement for new libstdc++, build for errata - -* Mon Dec 18 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.29 - -* Mon Nov 27 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.28 (gamma) -- remove old patches, as they are now upstreamed - -* Thu Nov 14 2000 Trond Eivind Glomsrd <teg@redhat.com> -- Add a requirement for a new glibc (#20735) -- build on IA64 - -* Wed Nov 1 2000 Trond Eivind Glomsrd <teg@redhat.com> -- disable more assembly - -* Wed Nov 1 2000 Jakub Jelinek <jakub@redhat.com> -- fix mysql on SPARC (#20124) - -* Tue Oct 31 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.27 - -* Wed Oct 25 2000 Trond Eivind Glomsrd <teg@redhat.com> -- add patch for fixing bogus aliasing in mysql from Jakub, - which should fix #18905 and #18620 - -* Mon Oct 23 2000 Trond Eivind Glomsrd <teg@redhat.com> -- check for negative niceness values, and negate it - if present (#17899) -- redefine optflags on IA32 FTTB - -* Wed Oct 18 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.26, which among other fixes now uses mkstemp() - instead of tempnam(). -- revert changes made yesterday, the problem is now - isolated - -* Tue Oct 17 2000 Trond Eivind Glomsrd <teg@redhat.com> -- use the compat C++ compiler FTTB. Argh. -- add requirement of ncurses4 (see above) - -* Sun Oct 01 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.25 -- fix shutdown problem (#17956) - -* Tue Sep 26 2000 Trond Eivind Glomsrd <teg@redhat.com> -- Don't try to include no-longer-existing PUBLIC file - as doc (#17532) - -* Thu Sep 12 2000 Trond Eivind Glomsrd <teg@redhat.com> -- rename config file to /etc/my.cnf, which is what - mysqld wants... doh. (#17432) -- include a changed safe_mysqld, so the pid file option - works. -- make mysql dir world readable to they can access the - mysql socket. (#17432) -- 3.23.24 - -* Wed Sep 06 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.23 - -* Sun Aug 27 2000 Trond Eivind Glomsrd <teg@redhat.com> -- Add "|| :" to condrestart to avoid non-zero exit code - -* Thu Aug 24 2000 Trond Eivind Glomsrd <teg@redhat.com> -- it's mysql.com, not mysql.org and use correct path to - source (#16830) - -* Wed Aug 16 2000 Trond Eivind Glomsrd <teg@redhat.com> -- source file from /etc/rc.d, not /etc/rd.d. Doh. - -* Sun Aug 13 2000 Trond Eivind Glomsrd <teg@redhat.com> -- don't run ldconfig -n, it doesn't update ld.so.cache - (#16034) -- include some missing binaries -- use safe_mysqld to start the server (request from - mysql developers) - -* Sat Aug 05 2000 Bill Nottingham <notting@redhat.com> -- condrestart fixes - -* Mon Aug 01 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.22. Disable the old patches, they're now in. - -* Thu Jul 27 2000 Trond Eivind Glomsrd <teg@redhat.com> -- bugfixes in the initscript -- move the .so link to the devel package - -* Wed Jul 19 2000 Trond Eivind Glomsrd <teg@redhat.com> -- rebuild due to glibc changes - -* Tue Jul 18 2000 Trond Eivind Glomsrd <teg@redhat.com> -- disable compiler patch -- don't include info directory file - -* Mon Jul 17 2000 Trond Eivind Glomsrd <teg@redhat.com> -- move back to /etc/rc.d/init.d - -* Fri Jul 14 2000 Trond Eivind Glomsrd <teg@redhat.com> -- more cleanups in initscript - -* Thu Jul 13 2000 Trond Eivind Glomsrd <teg@redhat.com> -- add a patch to work around compiler bug - (from monty@mysql.com) - -* Wed Jul 12 2000 Trond Eivind Glomsrd <teg@redhat.com> -- don't build the SQL daemon statically (glibc problems) -- fix the logrotate script - only flush log if mysql - is running -- change the reloading procedure -- remove icon - glint is obsolete a long time ago - -* Wed Jul 12 2000 Prospector <bugzilla@redhat.com> -- automatic rebuild - -* Mon Jul 10 2000 Trond Eivind Glomsrd <teg@redhat.com> -- try the new compiler again -- build the SQL daemon statically -- add compile time support for complex charsets -- enable assembler -- more cleanups in initscript - -* Sun Jul 09 2000 Trond Eivind Glomsrd <teg@redhat.com> -- use old C++ compiler -- Exclusivearch x86 - -* Sat Jul 08 2000 Trond Eivind Glomsrd <teg@redhat.com> -- move .so files to devel package -- more cleanups -- exclude sparc for now - -* Wed Jul 05 2000 Trond Eivind Glomsrd <teg@redhat.com> -- 3.23.21 -- remove file from /etc/sysconfig -- Fix initscript a bit - initialization of databases doesn't - work yet -- specify the correct licenses -- include a /etc/my.conf (empty, FTTB) -- add conditional restart to spec file - -* Tue Jul 2 2000 Jakub Jelinek <jakub@redhat.com> -- Rebuild with new C++ - -* Fri Jun 30 2000 Trond Eivind Glomsrd <teg@redhat.com> -- update to 3.23.20 -- use %%configure, %%makeinstall, %%{_tmppath}, %%{_mandir}, - %%{_infodir}, /etc/init.d -- remove the bench package -- change some of the descriptions a little bit -- fix the init script -- some compile fixes -- specify mysql user -- use mysql uid 27 (postgresql is 26) -- don't build on ia64 - -* Sat Feb 26 2000 Jos Vos <jos@xos.nl> -- Version 3.22.32 release XOS.1 for LinuX/OS 1.8.0 -- Upgrade from version 3.22.27 to 3.22.32. -- Do "make install" instead of "make install-strip", because "install -s" - now appears to fail on various scripts. Afterwards, strip manually. -- Reorganize subpackages, according to common Red Hat packages: the client - program and shared library become the base package and the server and - some accompanying files are now in a separate server package. The - server package implicitly requires the base package (shared library), - but we have added a manual require tag anyway (because of the shared - config file, and more). -- Rename the mysql-benchmark subpackage to mysql-bench. - -* Mon Jan 31 2000 Jos Vos <jos@xos.nl> -- Version 3.22.27 release XOS.2 for LinuX/OS 1.7.1 -- Add post(un)install scripts for updating ld.so.conf (client subpackage). - -* Sun Nov 21 1999 Jos Vos <jos@xos.nl> -- Version 3.22.27 release XOS.1 for LinuX/OS 1.7.0 -- Initial version. -- Some ideas borrowed from Red Hat Powertools 6.1, although this spec - file is a full rewrite from scratch. diff --git a/packaging/rpm-uln/scriptstub.c b/packaging/rpm-uln/scriptstub.c deleted file mode 100644 index de942c136e7..00000000000 --- a/packaging/rpm-uln/scriptstub.c +++ /dev/null @@ -1,32 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> - -/* Translate call of myself into call of same-named script in LIBDIR */ -/* The macro LIBDIR must be defined as a double-quoted string */ - -int main (int argc, char **argv) -{ - char *basename; - char *fullname; - char **newargs; - int i; - - basename = strrchr(argv[0], '/'); - if (basename) - basename++; - else - basename = argv[0]; - fullname = malloc(strlen(LIBDIR) + strlen(basename) + 2); - sprintf(fullname, "%s/%s", LIBDIR, basename); - newargs = malloc((argc+1) * sizeof(char *)); - newargs[0] = fullname; - for (i = 1; i < argc; i++) - newargs[i] = argv[i]; - newargs[argc] = NULL; - - execvp(fullname, newargs); - - return 1; -} diff --git a/plugin/metadata_lock_info/metadata_lock_info.cc b/plugin/metadata_lock_info/metadata_lock_info.cc index 2a2d5e58e2b..b45ea012617 100644 --- a/plugin/metadata_lock_info/metadata_lock_info.cc +++ b/plugin/metadata_lock_info/metadata_lock_info.cc @@ -163,7 +163,7 @@ maria_declare_plugin(metadata_lock_info) NULL, NULL, NULL, - MariaDB_PLUGIN_MATURITY_BETA, + MariaDB_PLUGIN_MATURITY_GAMMA, } maria_declare_plugin_end; #else diff --git a/plugin/query_response_time/query_response_time.cc b/plugin/query_response_time/query_response_time.cc index b28962a5db4..47d42f6fc88 100644 --- a/plugin/query_response_time/query_response_time.cc +++ b/plugin/query_response_time/query_response_time.cc @@ -227,7 +227,7 @@ public: char time[TIME_STRING_BUFFER_LENGTH]; char total[TOTAL_STRING_BUFFER_LENGTH]; if(i == bound_count()) - { + { assert(sizeof(TIME_OVERFLOW) <= TIME_STRING_BUFFER_LENGTH); assert(sizeof(TIME_OVERFLOW) <= TOTAL_STRING_BUFFER_LENGTH); memcpy(time,TIME_OVERFLOW,sizeof(TIME_OVERFLOW)); diff --git a/plugin/semisync/semisync_master_plugin.cc b/plugin/semisync/semisync_master_plugin.cc index b8240c80ef3..9eae7f03c34 100644 --- a/plugin/semisync/semisync_master_plugin.cc +++ b/plugin/semisync/semisync_master_plugin.cc @@ -441,7 +441,7 @@ maria_declare_plugin(semisync_master) semi_sync_master_status_vars, /* status variables */ semi_sync_master_system_vars, /* system variables */ "1.0", - MariaDB_PLUGIN_MATURITY_UNKNOWN + MariaDB_PLUGIN_MATURITY_GAMMA } maria_declare_plugin_end; diff --git a/plugin/semisync/semisync_slave_plugin.cc b/plugin/semisync/semisync_slave_plugin.cc index b98888ecd32..572ead214de 100644 --- a/plugin/semisync/semisync_slave_plugin.cc +++ b/plugin/semisync/semisync_slave_plugin.cc @@ -227,7 +227,7 @@ maria_declare_plugin(semisync_slave) semi_sync_slave_status_vars, /* status variables */ semi_sync_slave_system_vars, /* system variables */ "1.0", - MariaDB_PLUGIN_MATURITY_UNKNOWN + MariaDB_PLUGIN_MATURITY_GAMMA } maria_declare_plugin_end; diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c index 0b1ddbde6e4..fc1dbf4dde4 100644 --- a/plugin/server_audit/server_audit.c +++ b/plugin/server_audit/server_audit.c @@ -1653,7 +1653,7 @@ maria_declare_plugin(server_audit) audit_status, vars, PLUGIN_STR_VERSION, - MariaDB_PLUGIN_MATURITY_BETA + MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; diff --git a/plugin/win_auth_client/CMakeLists.txt b/plugin/win_auth_client/CMakeLists.txt index a017410252d..75ee55117bd 100644 --- a/plugin/win_auth_client/CMakeLists.txt +++ b/plugin/win_auth_client/CMakeLists.txt @@ -31,7 +31,6 @@ IF(WIN32) LINK_LIBRARIES Secur32 MODULE_ONLY COMPONENT SharedLibraries) - #INSTALL_DEBUG_SYMBOLS(auth_win_client) #IF(MSVC) # INSTALL_DEBUG_TARGET(auth_win_client DESTINATION ${INSTALL_LIBDIR}/debug) #ENDIF() diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 19d913bbe62..513aa0575c4 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -221,12 +221,17 @@ INSTALL_SCRIPT( ENDIF() SET(prefix "${CMAKE_INSTALL_PREFIX}") -SET(sysconfdir ${prefix}) +IF(INSTALL_SYSCONFDIR) + SET(sysconfdir ${DEFAULT_SYSCONFDIR}) +ELSE() + SET(sysconfdir "/etc") +ENDIF() SET(bindir ${prefix}/${INSTALL_BINDIR}) SET(libexecdir ${prefix}/${INSTALL_SBINDIR}) SET(scriptdir ${prefix}/${INSTALL_BINDIR}) SET(datadir ${prefix}/${INSTALL_MYSQLSHAREDIR}) SET(pkgdatadir ${prefix}/${INSTALL_MYSQLSHAREDIR}) +SET(libsubdir ${INSTALL_LIBDIR}) SET(pkgincludedir ${prefix}/${INSTALL_INCLUDEDIR}) SET(pkglibdir ${prefix}/${INSTALL_LIBDIR}) SET(pkgplugindir ${prefix}/${INSTALL_PLUGINDIR}) diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh index beff6e5eb5f..4909d5d0a87 100644 --- a/scripts/mysql_config.sh +++ b/scripts/mysql_config.sh @@ -76,7 +76,8 @@ get_full_path () me=`get_full_path $0` -basedir=`echo $me | sed -e 's;/bin/mysql_config;;'` +# Script might have been renamed but assume mysql_<something>config<something> +basedir=`echo $me | sed -e 's;/bin/mysql_.*config.*;;'` ldata='@localstatedir@' execdir='@libexecdir@' @@ -85,11 +86,11 @@ bindir='@bindir@' # If installed, search for the compiled in directory first (might be "lib64") pkglibdir='@pkglibdir@' pkglibdir_rel=`echo $pkglibdir | sed -e "s;^$basedir/;;"` -fix_path pkglibdir $pkglibdir_rel lib64/mysql lib64 lib/mysql lib +fix_path pkglibdir $pkglibdir_rel @libsubdir@/mysql @libsubdir@ plugindir='@pkgplugindir@' plugindir_rel=`echo $plugindir | sed -e "s;^$basedir/;;"` -fix_path plugindir $plugindir_rel lib/mysql/plugin lib/plugin +fix_path plugindir $plugindir_rel @libsubdir@/mysql/plugin @libsubdir@/plugin pkgincludedir='@pkgincludedir@' fix_path pkgincludedir include/mysql @@ -176,10 +177,10 @@ Options: pkglibdir [$pkglibdir] plugindir [$plugindir] EOF - exit 0 + exit $1 } -if test $# -le 0; then usage; fi +if test $# -le 0; then usage 0 ; fi while test $# -gt 0; do case $1 in @@ -198,10 +199,10 @@ while test $# -gt 0; do pkgincludedir) echo "$pkgincludedir" ;; pkglibdir) echo "$pkglibdir" ;; plugindir) echo "$plugindir" ;; - *) usage ;; + *) usage 1 >&2 ;; esac ;; - *) usage ;; + *) usage 1 >&2 ;; esac shift diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql index dbfe1fa5c8c..49a9062e13e 100644 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@ -648,6 +648,22 @@ DROP TABLE tmp_proxies_priv; # Convering the host name to lower case for existing users UPDATE user SET host=LOWER( host ) WHERE LOWER( host ) <> host; +# update timestamp fields in the innodb stat tables +set @str="alter table mysql.innodb_index_stats modify last_update timestamp not null default current_timestamp on update current_timestamp"; +set @str=if(@have_innodb <> 0, @str, "set @dummy = 0"); +prepare stmt from @str; +execute stmt; + +set @str=replace(@str, "innodb_index_stats", "innodb_table_stats"); +prepare stmt from @str; +execute stmt; + +SET @innodb_index_stats_fk= (select count(*) from information_schema.referential_constraints where constraint_schema='mysql' and table_name = 'innodb_index_stats' and referenced_table_name = 'innodb_table_stats' and constraint_name = 'innodb_index_stats_ibfk_1'); +SET @str=IF(@innodb_index_stats_fk > 0 and @have_innodb > 0, "ALTER TABLE mysql.innodb_index_stats DROP FOREIGN KEY `innodb_index_stats_ibfk_1`", "SET @dummy = 0"); +PREPARE stmt FROM @str; +EXECUTE stmt; +DROP PREPARE stmt; + # MDEV-4332 longer user names alter table user modify User char(80) binary not null default ''; alter table db modify User char(80) binary not null default ''; @@ -667,12 +683,5 @@ alter table tables_priv modify Grantor char(141) COLLATE utf8_bin not null # This should not be needed, but gives us some extra testing that the above # changes was correct -set @have_innodb= (select count(engine) from information_schema.engines where engine='INNODB' and support != 'NO'); -SET @innodb_index_stats_fk= (select count(*) from information_schema.referential_constraints where constraint_schema='mysql' and table_name = 'innodb_index_stats' and referenced_table_name = 'innodb_table_stats' and constraint_name = 'innodb_index_stats_ibfk_1'); -SET @str=IF(@innodb_index_stats_fk > 0 and @have_innodb > 0, "ALTER TABLE mysql.innodb_index_stats DROP FOREIGN KEY `innodb_index_stats_ibfk_1`", "SET @dummy = 0"); -PREPARE stmt FROM @str; -EXECUTE stmt; -DROP PREPARE stmt; - flush privileges; diff --git a/scripts/mysqlaccess.conf b/scripts/mysqlaccess.conf index faf47da5f6c..faf47da5f6c 100755..100644 --- a/scripts/mysqlaccess.conf +++ b/scripts/mysqlaccess.conf diff --git a/scripts/mysqlaccess.sh b/scripts/mysqlaccess.sh index 1d01c84735a..d8fd239585c 100644 --- a/scripts/mysqlaccess.sh +++ b/scripts/mysqlaccess.sh @@ -261,12 +261,12 @@ Release Notes: * log-file for debug-output : /tmp/mysqlaccess.log * default values are read from a configuration file $script.conf first this file is looked for in the current directory; if not - found it is looked for in /etc/ + found it is looked for in @sysconfdir@ Note that when default-values are given, these can't get overriden by empty (blanc) values! * CGI-BIN version with HTML and forms interface. Simply place the script in an ScriptAliased directory, make the configuration file - available in the that directory or in /etc, and point your browser + available in the that directory or in @sysconfdir@, and point your browser to the right URL. * copy the grant-rules to temporary tables, where you are safe to play with them. @@ -480,12 +480,12 @@ MySQLaccess::Report::Print_Header(); if (-f "./$script_conf") { require "./$script_conf"; } + elsif (-f "@prefix@/$script_conf") { + require "@prefix@/$script_conf"; + } elsif (-f "@sysconfdir@/$script_conf") { require "@sysconfdir@/$script_conf"; } - elsif (-f "/etc/$script_conf") { - require "/etc/$script_conf"; - } # **************************** # Read in all parameters @@ -950,8 +950,8 @@ sub MergeConfigFile { # ================================= sub MergeConfigFiles { my ($name,$pass,$uid,$gid,$quota,$comment,$gcos,$dir,$shell) = getpwuid $<; + MergeConfigFile("@prefix@/my.cnf"); MergeConfigFile("@sysconfdir@/my.cnf"); - MergeConfigFile("/etc/my.cnf"); MergeConfigFile("$dir/.my.cnf"); } diff --git a/scripts/mysqld_multi.sh b/scripts/mysqld_multi.sh index 52135071ac5..56885137a4b 100644 --- a/scripts/mysqld_multi.sh +++ b/scripts/mysqld_multi.sh @@ -590,9 +590,9 @@ sub list_defaults_files my %seen; # Don't list the same file more than once return grep { defined $_ and not $seen{$_}++ and -f $_ and -r $_ } - ('/etc/my.cnf', - '/etc/mysql/my.cnf', - '@sysconfdir@/my.cnf', + ('@sysconfdir@/my.cnf', + '@sysconfdir@/mysql/my.cnf', + '@prefix@/my.cnf', ($ENV{MYSQL_HOME} ? "$ENV{MYSQL_HOME}/my.cnf" : undef), $opt{'extra-file'}, ($ENV{HOME} ? "$ENV{HOME}/.my.cnf" : undef)); @@ -725,7 +725,7 @@ sub example { print <<EOF; # This is an example of a my.cnf file for $my_progname. -# Usually this file is located in home dir ~/.my.cnf or /etc/my.cnf +# Usually this file is located in home dir ~/.my.cnf or @sysconfdir@/my.cnf # # SOME IMPORTANT NOTES FOLLOW: # @@ -798,7 +798,7 @@ sub example # (as per Linux/Unix standard). You may even replace the # /etc/init.d/mysql.server script with it. # -# Before using, you must create a my.cnf file either in @sysconfdir@/my.cnf +# Before using, you must create a my.cnf file either in @prefix@/my.cnf # or /root/.my.cnf and add the [mysqld_multi] and [mysqld#] groups. # # The script can be found from support-files/mysqld_multi.server.sh diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh index e7eb2e72cbd..dc831434dbe 100644 --- a/scripts/mysqld_safe.sh +++ b/scripts/mysqld_safe.sh @@ -273,16 +273,6 @@ wsrep_recover_position() { } parse_arguments() { - # We only need to pass arguments through to the server if we don't - # handle them here. So, we collect unrecognized options (passed on - # the command line) into the args variable. - pick_args= - if test "$1" = PICK-ARGS-FROM-ARGV - then - pick_args=1 - shift - fi - for arg do val=`echo "$arg" | sed -e "s;--[^=]*=;;"` case "$arg" in @@ -340,11 +330,10 @@ parse_arguments() { --help) usage ;; *) - if test -n "$pick_args" - then - append_arg_to_args "$arg" - fi - ;; + case "$unrecognized_handling" in + collect) append_arg_to_args "$arg" ;; + complain) log_error "unknown option '$arg'" ;; + esac esac done } @@ -601,8 +590,16 @@ then SET_USER=0 fi +# If arguments come from [mysqld_safe] section of my.cnf +# we complain about unrecognized options +unrecognized_handling=complain parse_arguments `$print_defaults $defaults --loose-verbose mysqld_safe safe_mysqld mariadb_safe` -parse_arguments PICK-ARGS-FROM-ARGV "$@" + +# We only need to pass arguments through to the server if we don't +# handle them here. So, we collect unrecognized options (passed on +# the command line) into the args variable. +unrecognized_handling=collect +parse_arguments "$@" # diff --git a/scripts/mytop.sh b/scripts/mytop.sh index 480805e90b1..1cf1c5313b4 100644 --- a/scripts/mytop.sh +++ b/scripts/mytop.sh @@ -70,7 +70,6 @@ sub GetShowStatus(); sub cmd_s; sub cmd_S; sub cmd_q; -sub FindProg($); ## Default Config Values @@ -1366,9 +1365,9 @@ sub GetInnoDBStatus() { if (not $config{pager}) { - if (not $config{pager} = FindProg('less')) + if (not $config{pager} = my_which('less')) { - $config{pager} = FindProg('more'); + $config{pager} = my_which('more'); } } @@ -1467,9 +1466,9 @@ sub GetShowVariables() { if (not $config{pager}) { - if (not $config{pager} = FindProg('less')) + if (not $config{pager} = my_which('less')) { - $config{pager} = FindProg('more'); + $config{pager} = my_which('more'); } } @@ -1825,25 +1824,6 @@ sub Execute($) return $sth; } -sub FindProg($) -{ - my $prog = shift; - my $found = undef; - my @search_dirs = ("/bin", "/usr/bin", "/usr/sbin", - "/usr/local/bin", "/usr/local/sbin"); - - for (@search_dirs) - { - my $loc = "$_/$prog"; - if (-e $loc) - { - $found = $loc; - last; - } - } - return $found; -} - #### #### my_which is used, because we can't assume that every system has the #### which -command. my_which can take only one argument at a time. diff --git a/sql-bench/graph-compare-results.sh b/sql-bench/graph-compare-results.sh index ddc9080acd6..ddc9080acd6 100644..100755 --- a/sql-bench/graph-compare-results.sh +++ b/sql-bench/graph-compare-results.sh diff --git a/sql-common/client.c b/sql-common/client.c index 72760560903..c7065925198 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -764,8 +764,8 @@ cli_safe_read(MYSQL *mysql) restart: if (net->vio != 0) - len=my_net_read(net); - + len= my_net_read_packet(net, 0); + if (len == packet_error || len == 0) { DBUG_PRINT("error",("Wrong connection or packet. fd: %s len: %lu", diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index d25ed87e1fa..e99c7dca01c 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -35,7 +35,15 @@ ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h SET_SOURCE_FILES_PROPERTIES(${GEN_SOURCES} PROPERTIES GENERATED 1) -ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER -DHAVE_POOL_OF_THREADS) +ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER) + +IF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR + CMAKE_SYSTEM_NAME MATCHES "Windows" OR + CMAKE_SYSTEM_NAME MATCHES "SunOS" OR + HAVE_KQUEUE) + ADD_DEFINITIONS(-DHAVE_POOL_OF_THREADS) +ENDIF() + IF(SSL_DEFINES) ADD_DEFINITIONS(${SSL_DEFINES}) ENDIF() @@ -239,7 +247,9 @@ RUN_BISON( ) # Gen_lex_hash -ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc) +IF(NOT CMAKE_CROSSCOMPILING) + ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc) +ENDIF() ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h diff --git a/sql/create_options.cc b/sql/create_options.cc index d60639a4f4a..09153f7e35c 100644 --- a/sql/create_options.cc +++ b/sql/create_options.cc @@ -775,3 +775,20 @@ engine_option_value *merge_engine_table_options(engine_option_value *first, &first, &end); DBUG_RETURN(first); } + +bool is_engine_option_known(engine_option_value *opt, + ha_create_table_option *rules) +{ + if (!rules) + return false; + + for (; rules->name; rules++) + { + if (!my_strnncoll(system_charset_info, + (uchar*)rules->name, rules->name_length, + (uchar*)opt->name.str, opt->name.length)) + return true; + } + return false; +} + diff --git a/sql/create_options.h b/sql/create_options.h index d6b48822c49..eb21f291ff4 100644 --- a/sql/create_options.h +++ b/sql/create_options.h @@ -99,4 +99,6 @@ uchar *engine_table_options_frm_image(uchar *buff, bool engine_options_differ(void *old_struct, void *new_struct, ha_create_table_option *rules); +bool is_engine_option_known(engine_option_value *opt, + ha_create_table_option *rules); #endif diff --git a/sql/field.h b/sql/field.h index b5f332f5edc..06e7429c812 100644 --- a/sql/field.h +++ b/sql/field.h @@ -183,6 +183,29 @@ inline bool is_temporal_type(enum_field_types type) return mysql_type_to_time_type(type) != MYSQL_TIMESTAMP_ERROR; } + +/** + Tests if field type is temporal and has time part, + i.e. represents TIME, DATETIME or TIMESTAMP types in SQL. + + @param type Field type, as returned by field->type(). + @retval true If field type is temporal type with time part. + @retval false If field type is not temporal type with time part. +*/ +inline bool is_temporal_type_with_time(enum_field_types type) +{ + switch (type) + { + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_TIMESTAMP: + return true; + default: + return false; + } +} + + /* Virtual_column_info is the class to contain additional characteristics that is specific for a virtual/computed diff --git a/sql/filesort.cc b/sql/filesort.cc index 5ca6be2a2f4..23cfd6a1817 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -225,6 +225,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, table, num_rows, memory_available)) { DBUG_PRINT("info", ("filesort PQ is applicable")); + thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE; + status_var_increment(thd->status_var.filesort_pq_sorts_); const size_t compare_length= param.sort_length; if (pq.init(param.max_rows, true, // max_at_top @@ -719,6 +721,9 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select, /* Temporary set for register_used_fields and register_field_in_read_map */ sort_form->read_set= &sort_form->tmp_set; register_used_fields(param); + if (quick_select) + select->quick->add_used_key_part_to_set(sort_form->read_set); + Item *sort_cond= !select ? 0 : !select->pre_idx_push_select_cond ? select->cond : select->pre_idx_push_select_cond; diff --git a/sql/handler.cc b/sql/handler.cc index b3d4bdf757c..8c0a2013094 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -591,7 +591,19 @@ int ha_initialize_handlerton(st_plugin_int *plugin) savepoint_alloc_size+= tmp; hton2plugin[hton->slot]=plugin; if (hton->prepare) + { total_ha_2pc++; + if (tc_log && tc_log != get_tc_log_implementation()) + { + total_ha_2pc--; + hton->prepare= 0; + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "Cannot enable tc-log at run-time. " + "XA features of %s are disabled", + plugin->name.str); + } + } break; } /* fall through */ @@ -4763,11 +4775,13 @@ int ha_init_key_cache(const char *name, KEY_CACHE *key_cache, void *unused uint division_limit= (uint)key_cache->param_division_limit; uint age_threshold= (uint)key_cache->param_age_threshold; uint partitions= (uint)key_cache->param_partitions; + uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size; mysql_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(!init_key_cache(key_cache, tmp_block_size, tmp_buff_size, division_limit, age_threshold, + changed_blocks_hash_size, partitions)); } DBUG_RETURN(0); @@ -4788,10 +4802,12 @@ int ha_resize_key_cache(KEY_CACHE *key_cache) long tmp_block_size= (long) key_cache->param_block_size; uint division_limit= (uint)key_cache->param_division_limit; uint age_threshold= (uint)key_cache->param_age_threshold; + uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size; mysql_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(!resize_key_cache(key_cache, tmp_block_size, tmp_buff_size, - division_limit, age_threshold)); + division_limit, age_threshold, + changed_blocks_hash_size)); } DBUG_RETURN(0); } @@ -4831,10 +4847,12 @@ int ha_repartition_key_cache(KEY_CACHE *key_cache) uint division_limit= (uint)key_cache->param_division_limit; uint age_threshold= (uint)key_cache->param_age_threshold; uint partitions= (uint)key_cache->param_partitions; + uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size; mysql_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(!repartition_key_cache(key_cache, tmp_block_size, tmp_buff_size, division_limit, age_threshold, + changed_blocks_hash_size, partitions)); } DBUG_RETURN(0); diff --git a/sql/item.cc b/sql/item.cc index 21baf779781..107468030bb 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -2124,7 +2124,7 @@ bool agg_item_collations(DTCollation &c, const char *fname, bool unknown_cs= 0; c.set(av[0]->collation); - for (i= 1, arg= &av[item_sep]; i < count; i++, arg++) + for (i= 1, arg= &av[item_sep]; i < count; i++, arg+= item_sep) { if (c.aggregate((*arg)->collation, flags)) { @@ -6389,8 +6389,6 @@ int Item_hex_hybrid::save_in_field(Field *field, bool no_conversions) ulonglong nr; uint32 length= str_value.length(); - if (!length) - return 1; if (length > 8) { diff --git a/sql/item.h b/sql/item.h index 29e727b8d5f..00c1468bb48 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1034,9 +1034,47 @@ public: virtual cond_result eq_cmp_result() const { return COND_OK; } inline uint float_length(uint decimals_par) const { return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;} + /* Returns total number of decimal digits */ virtual uint decimal_precision() const; + /* Returns the number of integer part digits only */ inline int decimal_int_part() const { return my_decimal_int_part(decimal_precision(), decimals); } + /* + Returns the number of fractional digits only. + NOT_FIXED_DEC is replaced to the maximum possible number + of fractional digits, taking into account the data type. + */ + uint decimal_scale() const + { + return decimals < NOT_FIXED_DEC ? decimals : + is_temporal_type_with_time(field_type()) ? + TIME_SECOND_PART_DIGITS : + MY_MIN(max_length, DECIMAL_MAX_SCALE); + } + /* + Returns how many digits a divisor adds into a division result. + This is important when the integer part of the divisor can be 0. + In this example: + SELECT 1 / 0.000001; -> 1000000.0000 + the divisor adds 5 digits into the result precision. + + Currently this method only replaces NOT_FIXED_DEC to + TIME_SECOND_PART_DIGITS for temporal data types. + This method can be made virtual, to create more efficient (smaller) + data types for division results. + For example, in + SELECT 1/1.000001; + the divisor could provide no additional precision into the result, + so could any other items that are know to return a result + with non-zero integer part. + */ + uint divisor_precision_increment() const + { + return decimals < NOT_FIXED_DEC ? decimals : + is_temporal_type_with_time(field_type()) ? + TIME_SECOND_PART_DIGITS : + decimals; + } /** TIME or DATETIME precision of the item: 0..6 */ diff --git a/sql/item_create.cc b/sql/item_create.cc index 193c7deb207..532654910e2 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -1264,6 +1264,21 @@ protected: }; +#if defined(HAVE_SPATIAL) && !defined(DBUG_OFF) +class Create_func_gis_debug : public Create_func_arg1 +{ + public: + virtual Item *create_1_arg(THD *thd, Item *arg1); + + static Create_func_gis_debug s_singleton; + + protected: + Create_func_gis_debug() {} + virtual ~Create_func_gis_debug() {} +}; +#endif + + #ifdef HAVE_SPATIAL class Create_func_glength : public Create_func_arg1 { @@ -4159,6 +4174,17 @@ Create_func_get_lock::create_2_arg(THD *thd, Item *arg1, Item *arg2) } +#if defined(HAVE_SPATIAL) && !defined(DBUG_OFF) +Create_func_gis_debug Create_func_gis_debug::s_singleton; + +Item* +Create_func_gis_debug::create_1_arg(THD *thd, Item *arg1) +{ + return new (thd->mem_root) Item_func_gis_debug(arg1); +} +#endif + + #ifdef HAVE_SPATIAL Create_func_glength Create_func_glength::s_singleton; @@ -5854,6 +5880,9 @@ static Native_func_registry func_array[] = { { C_STRING_WITH_LEN("ST_GEOMETRYTYPE") }, GEOM_BUILDER(Create_func_geometry_type)}, { { C_STRING_WITH_LEN("ST_GEOMFROMTEXT") }, GEOM_BUILDER(Create_func_geometry_from_text)}, { { C_STRING_WITH_LEN("ST_GEOMFROMWKB") }, GEOM_BUILDER(Create_func_geometry_from_wkb)}, +#ifndef DBUG_OFF + { { C_STRING_WITH_LEN("ST_GIS_DEBUG") }, GEOM_BUILDER(Create_func_gis_debug)}, +#endif { { C_STRING_WITH_LEN("ST_EQUALS") }, GEOM_BUILDER(Create_func_equals)}, { { C_STRING_WITH_LEN("ST_INTERIORRINGN") }, GEOM_BUILDER(Create_func_interiorringn)}, { { C_STRING_WITH_LEN("ST_INTERSECTS") }, GEOM_BUILDER(Create_func_intersects)}, diff --git a/sql/item_func.cc b/sql/item_func.cc index 11110dddeb8..6e715192a22 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -593,7 +593,7 @@ my_decimal *Item_real_func::val_decimal(my_decimal *decimal_value) } -void Item_func::fix_num_length_and_dec() +void Item_udf_func::fix_num_length_and_dec() { uint fl_length= 0; decimals=0; @@ -611,11 +611,6 @@ void Item_func::fix_num_length_and_dec() } -void Item_func_numhybrid::fix_num_length_and_dec() -{} - - - /** Count max_length and decimals for temporal functions. @@ -803,9 +798,9 @@ bool Item_func_connection_id::fix_fields(THD *thd, Item **ref) function of two arguments. */ -void Item_num_op::find_num_type(void) +void Item_num_op::fix_length_and_dec(void) { - DBUG_ENTER("Item_num_op::find_num_type"); + DBUG_ENTER("Item_num_op::fix_length_and_dec"); DBUG_PRINT("info", ("name %s", func_name())); DBUG_ASSERT(arg_count == 2); Item_result r0= args[0]->cast_to_int_type(); @@ -849,22 +844,26 @@ void Item_num_op::find_num_type(void) type depends only on the first argument) */ -void Item_func_num1::find_num_type() +void Item_func_num1::fix_length_and_dec() { - DBUG_ENTER("Item_func_num1::find_num_type"); + DBUG_ENTER("Item_func_num1::fix_length_and_dec"); DBUG_PRINT("info", ("name %s", func_name())); switch (cached_result_type= args[0]->cast_to_int_type()) { case INT_RESULT: + max_length= args[0]->max_length; unsigned_flag= args[0]->unsigned_flag; break; case STRING_RESULT: case REAL_RESULT: cached_result_type= REAL_RESULT; + decimals= args[0]->decimals; // Preserve NOT_FIXED_DEC max_length= float_length(decimals); break; case TIME_RESULT: cached_result_type= DECIMAL_RESULT; case DECIMAL_RESULT: + decimals= args[0]->decimal_scale(); // Do not preserve NOT_FIXED_DEC + max_length= args[0]->max_length; break; case ROW_RESULT: case IMPOSSIBLE_RESULT: @@ -879,20 +878,6 @@ void Item_func_num1::find_num_type() } -void Item_func_num1::fix_num_length_and_dec() -{ - decimals= args[0]->decimals; - max_length= args[0]->max_length; -} - - -void Item_func_numhybrid::fix_length_and_dec() -{ - fix_num_length_and_dec(); - find_num_type(); -} - - String *Item_func_hybrid_result_type::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -1537,11 +1522,14 @@ my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value) */ void Item_func_additive_op::result_precision() { - decimals= MY_MAX(args[0]->decimals, args[1]->decimals); - int arg1_int= args[0]->decimal_precision() - args[0]->decimals; - int arg2_int= args[1]->decimal_precision() - args[1]->decimals; + decimals= MY_MAX(args[0]->decimal_scale(), args[1]->decimal_scale()); + int arg1_int= args[0]->decimal_precision() - args[0]->decimal_scale(); + int arg2_int= args[1]->decimal_precision() - args[1]->decimal_scale(); int precision= MY_MAX(arg1_int, arg2_int) + 1 + decimals; + DBUG_ASSERT(arg1_int >= 0); + DBUG_ASSERT(arg2_int >= 0); + /* Integer operations keep unsigned_flag if one of arguments is unsigned */ if (result_type() == INT_RESULT) unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; @@ -1778,7 +1766,8 @@ void Item_func_mul::result_precision() unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; else unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; - decimals= MY_MIN(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimal_scale() + args[1]->decimal_scale(), + DECIMAL_MAX_SCALE); uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision(); uint precision= MY_MIN(est_prec, DECIMAL_MAX_PRECISION); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, @@ -1832,8 +1821,20 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value) void Item_func_div::result_precision() { + /* + We need to add args[1]->divisor_precision_increment(), + to properly handle the cases like this: + SELECT 5.05 / 0.014; -> 360.714286 + i.e. when the divisor has a zero integer part + and non-zero digits appear only after the decimal point. + Precision in this example is calculated as + args[0]->decimal_precision() + // 3 + args[1]->divisor_precision_increment() + // 3 + prec_increment // 4 + which gives 10 decimals digits. + */ uint precision=MY_MIN(args[0]->decimal_precision() + - args[1]->decimals + prec_increment, + args[1]->divisor_precision_increment() + prec_increment, DECIMAL_MAX_PRECISION); /* Integer operations keep unsigned_flag if one of arguments is unsigned */ @@ -1841,7 +1842,7 @@ void Item_func_div::result_precision() unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; else unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; - decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimal_scale() + prec_increment, DECIMAL_MAX_SCALE); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag); } @@ -2047,7 +2048,7 @@ my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value) void Item_func_mod::result_precision() { - decimals= MY_MAX(args[0]->decimals, args[1]->decimals); + decimals= MY_MAX(args[0]->decimal_scale(), args[1]->decimal_scale()); max_length= MY_MAX(args[0]->max_length, args[1]->max_length); } @@ -2103,18 +2104,12 @@ my_decimal *Item_func_neg::decimal_op(my_decimal *decimal_value) } -void Item_func_neg::fix_num_length_and_dec() -{ - decimals= args[0]->decimals; - /* 1 add because sign can appear */ - max_length= args[0]->max_length + 1; -} - - void Item_func_neg::fix_length_and_dec() { DBUG_ENTER("Item_func_neg::fix_length_and_dec"); Item_func_num1::fix_length_and_dec(); + /* 1 add because sign can appear */ + max_length= args[0]->max_length + 1; /* If this is in integer context keep the context as integer if possible @@ -2421,8 +2416,12 @@ void Item_func_integer::fix_length_and_dec() decimals=0; } -void Item_func_int_val::fix_num_length_and_dec() + +void Item_func_int_val::fix_length_and_dec() { + DBUG_ENTER("Item_func_int_val::fix_length_and_dec"); + DBUG_PRINT("info", ("name %s", func_name())); + ulonglong tmp_max_length= (ulonglong ) args[0]->max_length - (args[0]->decimals ? args[0]->decimals + 1 : 0) + 2; max_length= tmp_max_length > (ulonglong) 4294967295U ? @@ -2430,13 +2429,7 @@ void Item_func_int_val::fix_num_length_and_dec() uint tmp= float_length(decimals); set_if_smaller(max_length,tmp); decimals= 0; -} - -void Item_func_int_val::find_num_type() -{ - DBUG_ENTER("Item_func_int_val::find_num_type"); - DBUG_PRINT("info", ("name %s", func_name())); switch (cached_result_type= args[0]->cast_to_int_type()) { case STRING_RESULT: @@ -2972,7 +2965,7 @@ bool Item_func_min_max::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) { ltime->time_type= MYSQL_TIMESTAMP_TIME; ltime->hour+= (ltime->month * 32 + ltime->day) * 24; - ltime->month= ltime->day= 0; + ltime->year= ltime->month= ltime->day= 0; if (adjust_time_range_with_warn(ltime, std::min<uint>(decimals, TIME_SECOND_PART_DIGITS))) return (null_value= true); @@ -3906,12 +3899,6 @@ String *Item_func_udf_decimal::val_str(String *str) } -void Item_func_udf_decimal::fix_length_and_dec() -{ - fix_num_length_and_dec(); -} - - /* Default max_length is max argument length */ void Item_func_udf_str::fix_length_and_dec() diff --git a/sql/item_func.h b/sql/item_func.h index 1696898812d..18265f672dd 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -154,7 +154,6 @@ public: virtual void print(String *str, enum_query_type query_type); void print_op(String *str, enum_query_type query_type); void print_args(String *str, uint from, enum_query_type query_type); - virtual void fix_num_length_and_dec(); void count_only_length(Item **item, uint nitems); void count_real_length(); void count_decimal_length(); @@ -541,9 +540,6 @@ public: Item_func_numhybrid(List<Item> &list) :Item_func_hybrid_result_type(list) { } - void fix_length_and_dec(); - void fix_num_length_and_dec(); - virtual void find_num_type()= 0; /* To be called from fix_length_and_dec */ String *str_op(String *str) { DBUG_ASSERT(0); return 0; } bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; } }; @@ -555,9 +551,7 @@ class Item_func_num1: public Item_func_numhybrid public: Item_func_num1(Item *a) :Item_func_numhybrid(a) {} Item_func_num1(Item *a, Item *b) :Item_func_numhybrid(a, b) {} - - void fix_num_length_and_dec(); - void find_num_type(); + void fix_length_and_dec(); }; @@ -573,7 +567,7 @@ class Item_num_op :public Item_func_numhybrid print_op(str, query_type); } - void find_num_type(); + void fix_length_and_dec(); }; @@ -795,7 +789,6 @@ public: const char *func_name() const { return "-"; } enum Functype functype() const { return NEG_FUNC; } void fix_length_and_dec(); - void fix_num_length_and_dec(); uint decimal_precision() const { return args[0]->decimal_precision(); } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} @@ -962,8 +955,7 @@ class Item_func_int_val :public Item_func_num1 { public: Item_func_int_val(Item *a) :Item_func_num1(a) {} - void fix_num_length_and_dec(); - void find_num_type(); + void fix_length_and_dec(); }; @@ -1376,6 +1368,7 @@ public: fixed= 1; return res; } + void fix_num_length_and_dec(); void update_used_tables() { /* @@ -1489,7 +1482,7 @@ public: my_decimal *val_decimal(my_decimal *); String *val_str(String *str); enum Item_result result_type () const { return DECIMAL_RESULT; } - void fix_length_and_dec(); + void fix_length_and_dec() { fix_num_length_and_dec(); } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 1deda83907c..d9200b3e8d3 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -1739,4 +1739,12 @@ mem_error: } +#ifndef DBUG_OFF +longlong Item_func_gis_debug::val_int() +{ + /* For now this is just a stub. TODO: implement the internal GIS debuggign */ + return 0; +} +#endif + #endif /*HAVE_SPATIAL*/ diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 2d715dc8765..6d52661e5c9 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -496,6 +496,18 @@ public: const char *func_name() const { return "st_distance"; } }; + +#ifndef DBUG_OFF +class Item_func_gis_debug: public Item_int_func +{ + public: + Item_func_gis_debug(Item *a) :Item_int_func(a) { null_value= false; } + const char *func_name() const { return "st_gis_debug"; } + longlong val_int(); +}; +#endif + + #define GEOM_NEW(thd, obj_constructor) new (thd->mem_root) obj_constructor #else /*HAVE_SPATIAL*/ diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index ec6ab0f3040..bb999f132c4 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -180,16 +180,27 @@ String *Item_func_md5::val_str_ascii(String *str) } +/* + The MD5()/SHA() functions treat their parameter as being a case sensitive. + Thus we set binary collation on it so different instances of MD5() will be + compared properly. +*/ +static CHARSET_INFO *get_checksum_charset(const char *csname) +{ + CHARSET_INFO *cs= get_charset_by_csname(csname, MY_CS_BINSORT, MYF(0)); + if (!cs) + { + // Charset has no binary collation: use my_charset_bin. + cs= &my_charset_bin; + } + return cs; +} + + void Item_func_md5::fix_length_and_dec() { - /* - The MD5() function treats its parameter as being a case sensitive. Thus - we set binary collation on it so different instances of MD5() will be - compared properly. - */ - args[0]->collation.set( - get_charset_by_csname(args[0]->collation.collation->csname, - MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE); + CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname); + args[0]->collation.set(cs, DERIVATION_COERCIBLE); fix_length_and_charset(32, default_charset()); } @@ -219,14 +230,8 @@ String *Item_func_sha::val_str_ascii(String *str) void Item_func_sha::fix_length_and_dec() { - /* - The SHA() function treats its parameter as being a case sensitive. Thus - we set binary collation on it so different instances of MD5() will be - compared properly. - */ - args[0]->collation.set( - get_charset_by_csname(args[0]->collation.collation->csname, - MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE); + CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname); + args[0]->collation.set(cs, DERIVATION_COERCIBLE); // size of hex representation of hash fix_length_and_charset(SHA1_HASH_SIZE * 2, default_charset()); } @@ -349,18 +354,9 @@ void Item_func_sha2::fix_length_and_dec() ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2"); } - /* - The SHA2() function treats its parameter as being a case sensitive. - Thus we set binary collation on it so different instances of SHA2() - will be compared properly. - */ + CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname); + args[0]->collation.set(cs, DERIVATION_COERCIBLE); - args[0]->collation.set( - get_charset_by_csname( - args[0]->collation.collation->csname, - MY_CS_BINSORT, - MYF(0)), - DERIVATION_COERCIBLE); #else push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, @@ -514,39 +510,42 @@ void Item_func_from_base64::fix_length_and_dec() String *Item_func_from_base64::val_str(String *str) { String *res= args[0]->val_str_ascii(str); - bool too_long= false; int length; const char *end_ptr; - if (!res || - res->length() > (uint) base64_decode_max_arg_length() || - (too_long= - ((uint) (length= base64_needed_decoded_length((int) res->length())) > - current_thd->variables.max_allowed_packet)) || - tmp_value.alloc((uint) length) || - (length= base64_decode(res->ptr(), (int) res->length(), + if (!res) + goto err; + + if (res->length() > (uint) base64_decode_max_arg_length() || + ((uint) (length= base64_needed_decoded_length((int) res->length())) > + current_thd->variables.max_allowed_packet)) + { + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), + current_thd->variables.max_allowed_packet); + goto err; + } + + if (tmp_value.alloc((uint) length)) + goto err; + + if ((length= base64_decode(res->ptr(), (int) res->length(), (char *) tmp_value.ptr(), &end_ptr, 0)) < 0 || end_ptr < res->ptr() + res->length()) { - null_value= 1; // NULL input, too long input, OOM, or badly formed input - if (too_long) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_WARN_ALLOWED_PACKET_OVERFLOWED, - ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), - current_thd->variables.max_allowed_packet); - } - else if (res && length < 0) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_BAD_BASE64_DATA, ER(ER_BAD_BASE64_DATA), - end_ptr - res->ptr()); - } - return 0; + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_BAD_BASE64_DATA, ER(ER_BAD_BASE64_DATA), + end_ptr - res->ptr()); + goto err; } + tmp_value.length((uint) length); null_value= 0; return &tmp_value; +err: + null_value= 1; // NULL input, too long input, OOM, or badly formed input + return 0; } /////////////////////////////////////////////////////////////////////////////// @@ -599,7 +598,7 @@ String *Item_func_decode_histogram::val_str(String *str) val= p[i] / ((double)((1 << 8) - 1)); break; case DOUBLE_PREC_HB: - val= ((uint16 *)(p + i))[0] / ((double)((1 << 16) - 1)); + val= uint2korr(p + i) / ((double)((1 << 16) - 1)); i++; break; default: @@ -1963,7 +1962,7 @@ String *Item_func_ltrim::val_str(String *str) if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) - return res; + return non_trimmed_value(res); ptr= (char*) res->ptr(); end= ptr+res->length(); @@ -1982,9 +1981,8 @@ String *Item_func_ltrim::val_str(String *str) end+=remove_length; } if (ptr == res->ptr()) - return res; - tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr)); - return &tmp_value; + return non_trimmed_value(res); + return trimmed_value(res, (uint32) (ptr - res->ptr()), (uint32) (end - ptr)); } @@ -2010,7 +2008,7 @@ String *Item_func_rtrim::val_str(String *str) if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) - return res; + return non_trimmed_value(res); ptr= (char*) res->ptr(); end= ptr+res->length(); @@ -2022,11 +2020,11 @@ String *Item_func_rtrim::val_str(String *str) { char chr=(*remove_str)[0]; #ifdef USE_MB - if (use_mb(res->charset())) + if (use_mb(collation.collation)) { while (ptr < end) { - if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l,p=ptr; + if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l, p=ptr; else ++ptr; } ptr=p; @@ -2039,12 +2037,12 @@ String *Item_func_rtrim::val_str(String *str) { const char *r_ptr=remove_str->ptr(); #ifdef USE_MB - if (use_mb(res->charset())) + if (use_mb(collation.collation)) { loop: while (ptr + remove_length < end) { - if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l; + if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l; else ++ptr; } if (ptr + remove_length == end && !memcmp(ptr,r_ptr,remove_length)) @@ -2063,9 +2061,8 @@ String *Item_func_rtrim::val_str(String *str) } } if (end == res->ptr()+res->length()) - return res; - tmp_value.set(*res,0,(uint) (end-res->ptr())); - return &tmp_value; + return non_trimmed_value(res); + return trimmed_value(res, 0, (uint32) (end - res->ptr())); } @@ -2092,37 +2089,22 @@ String *Item_func_trim::val_str(String *str) if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) - return res; + return non_trimmed_value(res); ptr= (char*) res->ptr(); end= ptr+res->length(); r_ptr= remove_str->ptr(); + while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length)) + ptr+=remove_length; #ifdef USE_MB - if (use_mb(res->charset())) + if (use_mb(collation.collation)) { - while (ptr + remove_length <= end) - { - uint num_bytes= 0; - while (num_bytes < remove_length) - { - uint len; - if ((len= my_ismbchar(res->charset(), ptr + num_bytes, end))) - num_bytes+= len; - else - ++num_bytes; - } - if (num_bytes != remove_length) - break; - if (memcmp(ptr, r_ptr, remove_length)) - break; - ptr+= remove_length; - } char *p=ptr; register uint32 l; loop: while (ptr + remove_length < end) { - if ((l= my_ismbchar(res->charset(), ptr,end))) + if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l; else ++ptr; @@ -2138,16 +2120,13 @@ String *Item_func_trim::val_str(String *str) else #endif /* USE_MB */ { - while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length)) - ptr+=remove_length; while (ptr + remove_length <= end && !memcmp(end-remove_length,r_ptr,remove_length)) end-=remove_length; } if (ptr == res->ptr() && end == ptr+res->length()) - return res; - tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr)); - return &tmp_value; + return non_trimmed_value(res); + return trimmed_value(res, (uint32) (ptr - res->ptr()), (uint32) (end - ptr)); } void Item_func_trim::fix_length_and_dec() diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index f3d5c064423..4551cc1ab46 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -349,6 +349,21 @@ class Item_func_trim :public Item_str_func protected: String tmp_value; String remove; + String *trimmed_value(String *res, uint32 offset, uint32 length) + { + tmp_value.set(*res, offset, length); + /* + Make sure to return correct charset and collation: + TRIM(0x000000 FROM _ucs2 0x0061) + should set charset to "binary" rather than to "ucs2". + */ + tmp_value.set_charset(collation.collation); + return &tmp_value; + } + String *non_trimmed_value(String *res) + { + return trimmed_value(res, 0, res->length()); + } public: Item_func_trim(Item *a,Item *b) :Item_str_func(a,b) {} Item_func_trim(Item *a) :Item_str_func(a) {} diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 38bb3121ed8..7db7b014d28 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -3654,8 +3654,9 @@ int subselect_single_select_engine::exec() pushed down into the subquery. Those optimizations are ref[_or_null] acceses. Change them to be full table scans. */ - for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); tab; - tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) + JOIN_TAB *tab; + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); + tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { if (tab && tab->keyuse) { diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 29badddad8e..cb8b59501a4 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -413,16 +413,15 @@ protected: public: Item_func_seconds_hybrid() :Item_func_numhybrid() {} Item_func_seconds_hybrid(Item *a) :Item_func_numhybrid(a) {} - void fix_num_length_and_dec() + void fix_length_and_dec() { if (arg_count) decimals= args[0]->temporal_precision(arg0_expected_type()); set_if_smaller(decimals, TIME_SECOND_PART_DIGITS); max_length=17 + (decimals ? decimals + 1 : 0); maybe_null= true; + cached_result_type= decimals ? DECIMAL_RESULT : INT_RESULT; } - void find_num_type() - { cached_result_type= decimals ? DECIMAL_RESULT : INT_RESULT; } double real_op() { DBUG_ASSERT(0); return 0; } String *str_op(String *str) { DBUG_ASSERT(0); return 0; } bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; } @@ -470,11 +469,6 @@ protected: public: Item_func_time_to_sec(Item *item) :Item_func_seconds_hybrid(item) {} const char *func_name() const { return "time_to_sec"; } - void fix_num_length_and_dec() - { - maybe_null= true; - Item_func_seconds_hybrid::fix_num_length_and_dec(); - } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} bool check_valid_arguments_processor(uchar *int_arg) diff --git a/sql/log.cc b/sql/log.cc index 0da411d42e2..79771ab8950 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2538,7 +2538,7 @@ static int find_uniq_filename(char *name) file_info= dir_info->dir_entry; for (i= dir_info->number_of_files ; i-- ; file_info++) { - if (memcmp(file_info->name, start, length) == 0 && + if (strncmp(file_info->name, start, length) == 0 && test_if_number(file_info->name+length, &number,0)) { set_if_bigger(max_found,(ulong) number); @@ -2815,11 +2815,13 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name) { if (!fn_ext(log_name)[0]) { - if (find_uniq_filename(new_name)) + if (DBUG_EVALUATE_IF("binlog_inject_new_name_error", TRUE, FALSE) || + find_uniq_filename(new_name)) { - my_printf_error(ER_NO_UNIQUE_LOGFILE, ER(ER_NO_UNIQUE_LOGFILE), - MYF(ME_FATALERROR), log_name); - sql_print_error(ER(ER_NO_UNIQUE_LOGFILE), log_name); + if (current_thd) + my_printf_error(ER_NO_UNIQUE_LOGFILE, ER(ER_NO_UNIQUE_LOGFILE), + MYF(ME_FATALERROR), log_name); + sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name); return 1; } } @@ -3072,7 +3074,8 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, my_b_printf(&log_file, "# Full_scan: %s Full_join: %s " "Tmp_table: %s Tmp_table_on_disk: %s\n" - "# Filesort: %s Filesort_on_disk: %s Merge_passes: %lu\n", + "# Filesort: %s Filesort_on_disk: %s Merge_passes: %lu " + "Priority_queue: %s\n", ((thd->query_plan_flags & QPLAN_FULL_SCAN) ? "Yes" : "No"), ((thd->query_plan_flags & QPLAN_FULL_JOIN) ? "Yes" : "No"), ((thd->query_plan_flags & QPLAN_TMP_TABLE) ? "Yes" : "No"), @@ -3080,7 +3083,10 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, ((thd->query_plan_flags & QPLAN_FILESORT) ? "Yes" : "No"), ((thd->query_plan_flags & QPLAN_FILESORT_DISK) ? "Yes" : "No"), - thd->query_plan_fsort_passes) == (size_t) -1) + thd->query_plan_fsort_passes, + ((thd->query_plan_flags & QPLAN_FILESORT_PRIORITY_QUEUE) ? + "Yes" : "No") + ) == (size_t) -1) tmp_errno= errno; if (thd->variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_EXPLAIN && thd->lex->explain) @@ -4265,6 +4271,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included) { int error; char *to_purge_if_included= NULL; + inuse_relaylog *ir; DBUG_ENTER("purge_first_log"); DBUG_ASSERT(is_open()); @@ -4272,7 +4279,30 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included) DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name)); mysql_mutex_lock(&LOCK_index); - to_purge_if_included= my_strdup(rli->group_relay_log_name, MYF(0)); + + ir= rli->inuse_relaylog_list; + while (ir) + { + inuse_relaylog *next= ir->next; + if (!ir->completed || ir->dequeued_count < ir->queued_count) + { + included= false; + break; + } + if (!included && !strcmp(ir->name, rli->group_relay_log_name)) + break; + if (!next) + { + rli->last_inuse_relaylog= NULL; + included= 1; + to_purge_if_included= my_strdup(ir->name, MYF(0)); + } + my_free(ir); + ir= next; + } + rli->inuse_relaylog_list= ir; + if (ir) + to_purge_if_included= my_strdup(ir->name, MYF(0)); /* Read the next log file name from the index file and pass it back to @@ -7009,7 +7039,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) /* Interrupted by kill. */ DEBUG_SYNC(orig_entry->thd, "group_commit_waiting_for_prior_killed"); wfc->wakeup_error= orig_entry->thd->killed_errno(); - if (wfc->wakeup_error) + if (!wfc->wakeup_error) wfc->wakeup_error= ER_QUERY_INTERRUPTED; my_message(wfc->wakeup_error, ER(wfc->wakeup_error), MYF(0)); DBUG_RETURN(-1); @@ -7020,12 +7050,6 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) else mysql_mutex_unlock(&wfc->LOCK_wait_commit); } - if (wfc && wfc->wakeup_error) - { - my_error(ER_PRIOR_COMMIT_FAILED, MYF(0)); - DBUG_RETURN(-1); - } - /* If the transaction we were waiting for has already put us into the group commit queue (and possibly already done the entire binlog commit for us), @@ -7034,6 +7058,12 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) if (orig_entry->queued_by_other) DBUG_RETURN(0); + if (wfc && wfc->wakeup_error) + { + my_error(ER_PRIOR_COMMIT_FAILED, MYF(0)); + DBUG_RETURN(-1); + } + /* Now enqueue ourselves in the group commit queue. */ DEBUG_SYNC(orig_entry->thd, "commit_before_enqueue"); orig_entry->thd->clear_wakeup_ready(); @@ -9244,6 +9274,8 @@ binlog_background_thread(void *arg __attribute__((unused))) thd->thread_id= thread_id++; mysql_mutex_unlock(&LOCK_thread_count); thd->store_globals(); + thd->security_ctx->skip_grants(); + thd->set_command(COM_DAEMON); /* Load the slave replication GTID state from the mysql.gtid_slave_pos @@ -9547,7 +9579,7 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, file= -1; } - if (0 == strcmp(linfo->log_file_name, last_log_name)) + if (!strcmp(linfo->log_file_name, last_log_name)) break; // No more files to do if ((file= open_binlog(&log, linfo->log_file_name, &errmsg)) < 0) { diff --git a/sql/log.h b/sql/log.h index bf11edfd429..d5aab4ac612 100644 --- a/sql/log.h +++ b/sql/log.h @@ -1094,6 +1094,13 @@ end: DBUG_RETURN(error); } - +static inline TC_LOG *get_tc_log_implementation() +{ + if (total_ha_2pc <= 1) + return &tc_log_dummy; + if (opt_bin_log) + return &mysql_bin_log; + return &tc_log_mmap; +} #endif /* LOG_H */ diff --git a/sql/log_event.cc b/sql/log_event.cc index 4edd41fdb2e..77353d33bf1 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -193,6 +193,28 @@ static const char *HA_ERR(int i) return "No Error!"; } + +/* + Return true if an error caught during event execution is a temporary error + that will cause automatic retry of the event group during parallel + replication, false otherwise. + + In parallel replication, conflicting transactions can occasionally cause + deadlocks; such errors are handled automatically by rolling back re-trying + the transactions, so should not pollute the error log. +*/ +static bool +is_parallel_retry_error(rpl_group_info *rgi, int err) +{ + if (!rgi->is_parallel_exec) + return false; + if (rgi->killed_for_retry && + (err == ER_QUERY_INTERRUPTED || err == ER_CONNECTION_KILLED)) + return true; + return has_temporary_error(rgi->thd); +} + + /** Error reporting facility for Rows_log_event::do_apply_event @@ -207,7 +229,7 @@ static const char *HA_ERR(int i) */ static void inline slave_rows_error_report(enum loglevel level, int ha_error, - Relay_log_info const *rli, THD *thd, + rpl_group_info *rgi, THD *thd, TABLE *table, const char * type, const char *log_name, ulong pos) { @@ -217,8 +239,19 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, uint len; Diagnostics_area::Sql_condition_iterator it= thd->get_stmt_da()->sql_conditions(); + Relay_log_info const *rli= rgi->rli; const Sql_condition *err; buff[0]= 0; + int errcode= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0; + + /* + In parallel replication, deadlocks or other temporary errors can happen + occasionally in normal operation, they will be handled correctly and + automatically by re-trying the transactions. So do not pollute the error + log with messages about them. + */ + if (is_parallel_retry_error(rgi, errcode)) + return; for (err= it++, slider= buff; err && slider < buff_end - 1; slider += len, err= it++) @@ -229,7 +262,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, } if (ha_error != 0) - rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0, + rli->report(level, errcode, rgi->gtid_info(), "Could not execute %s event on table %s.%s;" "%s handler error %s; " "the event's master log %s, end_log_pos %lu", @@ -237,7 +270,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, buff, handler_error == NULL ? "<unknown>" : handler_error, log_name, pos); else - rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0, + rli->report(level, errcode, rgi->gtid_info(), "Could not execute %s event on table %s.%s;" "%s the event's master log %s, end_log_pos %lu", type, table->s->db.str, table->s->table_name.str, @@ -4098,9 +4131,10 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, */ int error; char llbuff[22]; - if ((error= rows_event_stmt_cleanup(rgi, thd))) + if ((error= rows_event_stmt_cleanup(rgi, thd)) && + !is_parallel_retry_error(rgi, error)) { - const_cast<Relay_log_info*>(rli)->report(ERROR_LEVEL, error, + rli->report(ERROR_LEVEL, error, rgi->gtid_info(), "Error in cleaning up after an event preceding the commit; " "the group log file/position: %s %s", const_cast<Relay_log_info*>(rli)->group_master_log_name, @@ -4245,21 +4279,24 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, Record any GTID in the same transaction, so slave state is transactionally consistent. */ - if (current_stmt_is_commit && (sub_id= rgi->gtid_sub_id)) + if (current_stmt_is_commit && rgi->gtid_pending) { - /* Clear the GTID from the RLI so we don't accidentally reuse it. */ - rgi->gtid_sub_id= 0; + sub_id= rgi->gtid_sub_id; + rgi->gtid_pending= false; gtid= rgi->current_gtid; thd->variables.option_bits&= ~OPTION_GTID_BEGIN; if (rpl_global_gtid_slave_state.record_gtid(thd, >id, sub_id, true, false)) { - rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, - "Error during COMMIT: failed to update GTID state in " - "%s.%s: %d: %s", - "mysql", rpl_gtid_slave_state_table_name.str, - thd->get_stmt_da()->sql_errno(), - thd->get_stmt_da()->message()); + int errcode= thd->get_stmt_da()->sql_errno(); + if (!is_parallel_retry_error(rgi, errcode)) + rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, + rgi->gtid_info(), + "Error during COMMIT: failed to update GTID state in " + "%s.%s: %d: %s", + "mysql", rpl_gtid_slave_state_table_name.str, + errcode, + thd->get_stmt_da()->message()); trans_rollback(thd); sub_id= 0; thd->is_slave_error= 1; @@ -4327,7 +4364,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); /* Can ignore query */ else { - rli->report(ERROR_LEVEL, expected_error, + rli->report(ERROR_LEVEL, expected_error, rgi->gtid_info(), "\ Query partially completed on the master (error on master: %d) \ and was aborted. There is a chance that your master is inconsistent at this \ @@ -4383,7 +4420,7 @@ compare_errors: !ignored_error_code(actual_error) && !ignored_error_code(expected_error)) { - rli->report(ERROR_LEVEL, 0, + rli->report(ERROR_LEVEL, 0, rgi->gtid_info(), "\ Query caused different errors on master and slave. \ Error on master: message (format)='%s' error code=%d ; \ @@ -4406,18 +4443,21 @@ Default database: '%s'. Query: '%s'", { DBUG_PRINT("info",("error ignored")); clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); - thd->reset_killed(); + if (actual_error == ER_QUERY_INTERRUPTED || + actual_error == ER_CONNECTION_KILLED) + thd->reset_killed(); } /* Other cases: mostly we expected no error and get one. */ else if (thd->is_slave_error || thd->is_fatal_error) { - rli->report(ERROR_LEVEL, actual_error, - "Error '%s' on query. Default database: '%s'. Query: '%s'", - (actual_error ? thd->get_stmt_da()->message() : - "unexpected success or fatal error"), - print_slave_db_safe(thd->db), query_arg); + if (!is_parallel_retry_error(rgi, actual_error)) + rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(), + "Error '%s' on query. Default database: '%s'. Query: '%s'", + (actual_error ? thd->get_stmt_da()->message() : + "unexpected success or fatal error"), + print_slave_db_safe(thd->db), query_arg); thd->is_slave_error= 1; } @@ -5084,7 +5124,7 @@ int Format_description_log_event::do_apply_event(rpl_group_info *rgi) if (!is_artificial_event() && created && thd->transaction.all.ha_list) { /* This is not an error (XA is safe), just an information */ - rli->report(INFORMATION_LEVEL, 0, + rli->report(INFORMATION_LEVEL, 0, NULL, "Rolling back unfinished transaction (no COMMIT " "or ROLLBACK in relay log). A probable cause is that " "the master died while writing the transaction to " @@ -6025,7 +6065,7 @@ error: sql_errno=ER_UNKNOWN_ERROR; err=ER(sql_errno); } - rli->report(ERROR_LEVEL, sql_errno,"\ + rli->report(ERROR_LEVEL, sql_errno, rgi->gtid_info(), "\ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", err, (char*)table_name, print_slave_db_safe(remember_db)); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); @@ -6042,7 +6082,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", (char*)table_name, print_slave_db_safe(remember_db)); - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), buf); DBUG_RETURN(1); } @@ -6532,12 +6572,10 @@ Gtid_log_event::do_apply_event(rpl_group_info *rgi) thd->variables.server_id= this->server_id; thd->variables.gtid_domain_id= this->domain_id; thd->variables.gtid_seq_no= this->seq_no; + mysql_reset_thd_for_next_command(thd); if (opt_gtid_strict_mode && opt_bin_log && opt_log_slave_updates) { - /* Need to reset prior "ok" status to give an error. */ - thd->clear_error(); - thd->get_stmt_da()->reset_diagnostics_area(); if (mysql_bin_log.check_strict_gtid_sequence(this->domain_id, this->server_id, this->seq_no)) return 1; @@ -7315,28 +7353,41 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) bool res; int err; rpl_gtid gtid; - uint64 sub_id; + uint64 sub_id= 0; Relay_log_info const *rli= rgi->rli; /* + XID_EVENT works like a COMMIT statement. And it also updates the + mysql.gtid_slave_pos table with the GTID of the current transaction. + + Therefore, it acts much like a normal SQL statement, so we need to do + mysql_reset_thd_for_next_command() as if starting a new statement. + */ + mysql_reset_thd_for_next_command(thd); + /* Record any GTID in the same transaction, so slave state is transactionally consistent. */ - if ((sub_id= rgi->gtid_sub_id)) + if (rgi->gtid_pending) { - /* Clear the GTID from the RLI so we don't accidentally reuse it. */ - rgi->gtid_sub_id= 0; + sub_id= rgi->gtid_sub_id; + rgi->gtid_pending= false; gtid= rgi->current_gtid; err= rpl_global_gtid_slave_state.record_gtid(thd, >id, sub_id, true, false); if (err) { - rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, - "Error during XID COMMIT: failed to update GTID state in " - "%s.%s: %d: %s", - "mysql", rpl_gtid_slave_state_table_name.str, - thd->get_stmt_da()->sql_errno(), - thd->get_stmt_da()->message()); + int ec= thd->get_stmt_da()->sql_errno(); + /* + Do not report an error if this is really a kill due to a deadlock. + In this case, the transaction will be re-tried instead. + */ + if (!is_parallel_retry_error(rgi, ec)) + rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(), + "Error during XID COMMIT: failed to update GTID state in " + "%s.%s: %d: %s", + "mysql", rpl_gtid_slave_state_table_name.str, ec, + thd->get_stmt_da()->message()); trans_rollback(thd); thd->is_slave_error= 1; return err; @@ -8366,7 +8417,7 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi) init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0, MYF(MY_WME|MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: could not open file '%s'", fname_buf); goto err; @@ -8378,7 +8429,7 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi) if (write_base(&file)) { strmov(ext, ".info"); // to have it right in the error message - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: could not write to file '%s'", fname_buf); goto err; @@ -8394,14 +8445,14 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi) O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, MYF(MY_WME))) < 0) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: could not open file '%s'", fname_buf); goto err; } if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: write to '%s' failed", fname_buf); goto err; @@ -8558,7 +8609,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi) O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, MYF(MY_WME))) < 0) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in %s event: could not create file '%s'", get_type_str(), fname); goto err; @@ -8569,7 +8620,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi) O_WRONLY | O_APPEND | O_BINARY | O_NOFOLLOW, MYF(MY_WME))) < 0) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in %s event: could not open file '%s'", get_type_str(), fname); goto err; @@ -8582,7 +8633,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi) if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in %s event: write to '%s' failed", get_type_str(), fname); goto err; @@ -8807,7 +8858,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi) init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0, MYF(MY_WME|MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Exec_load event: could not open file '%s'", fname); goto err; @@ -8819,7 +8870,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi) opt_slave_sql_verify_checksum)) || lev->get_type_code() != NEW_LOAD_EVENT) { - rli->report(ERROR_LEVEL, 0, "Error in Exec_load event: " + rli->report(ERROR_LEVEL, 0, rgi->gtid_info(), "Error in Exec_load event: " "file '%s' appears corrupted", fname); goto err; } @@ -8845,7 +8896,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi) char *tmp= my_strdup(rli->last_error().message, MYF(MY_WME)); if (tmp) { - rli->report(ERROR_LEVEL, rli->last_error().number, + rli->report(ERROR_LEVEL, rli->last_error().number, rgi->gtid_info(), "%s. Failed executing load from '%s'", tmp, fname); my_free(tmp); } @@ -9017,9 +9068,9 @@ void Execute_load_query_log_event::print(FILE* file, if (local_fname) { my_b_write(&cache, (uchar*) query, fn_pos_start); - my_b_write_string(&cache, " LOCAL INFILE \'"); - my_b_printf(&cache, "%s", local_fname); - my_b_write_string(&cache, "\'"); + my_b_write_string(&cache, " LOCAL INFILE "); + pretty_print_str(&cache, local_fname, strlen(local_fname)); + if (dup_handling == LOAD_DUP_REPLACE) my_b_write_string(&cache, " REPLACE"); my_b_write_string(&cache, " INTO"); @@ -9078,7 +9129,7 @@ Execute_load_query_log_event::do_apply_event(rpl_group_info *rgi) /* Replace filename and LOCAL keyword in query before executing it */ if (buf == NULL) { - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), "Not enough memory"); return 1; } @@ -9686,6 +9737,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0)) { uint actual_error= thd->get_stmt_da()->sql_errno(); + #ifdef WITH_WSREP if (WSREP(thd)) { @@ -9696,9 +9748,11 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) thd->wsrep_exec_mode, thd->wsrep_conflict_state, (long long)wsrep_thd_trx_seqno(thd)); - } + } #endif - if (thd->is_slave_error || thd->is_fatal_error) + + if ((thd->is_slave_error || thd->is_fatal_error) && + !is_parallel_retry_error(rgi, actual_error)) { /* Error reporting borrowed from Query_log_event with many excessive @@ -9706,7 +9760,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) We should not honour --slave-skip-errors at this point as we are having severe errors which should not be skiped. */ - rli->report(ERROR_LEVEL, actual_error, + rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(), "Error executing row event: '%s'", (actual_error ? thd->get_stmt_da()->message() : "unexpected success or fatal error")); @@ -9747,8 +9801,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) { DBUG_ASSERT(ptr->m_tabledef_valid); TABLE *conv_table; - if (!ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli), - ptr->table, &conv_table)) + if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table)) { DBUG_PRINT("debug", ("Table: %s.%s is not compatible with master", ptr->table->s->db.str, @@ -9904,7 +9957,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (idempotent_error || ignored_error) { if (global_system_variables.log_warnings) - slave_rows_error_report(WARNING_LEVEL, error, rli, thd, table, + slave_rows_error_report(WARNING_LEVEL, error, rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); @@ -9960,7 +10013,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) { if (global_system_variables.log_warnings) - slave_rows_error_report(WARNING_LEVEL, error, rli, thd, table, + slave_rows_error_report(WARNING_LEVEL, error, rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); @@ -9971,7 +10024,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (error) { - slave_rows_error_report(ERROR_LEVEL, error, rli, thd, table, + slave_rows_error_report(ERROR_LEVEL, error, rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); /* @@ -9993,7 +10046,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (get_flags(STMT_END_F) && (error= rows_event_stmt_cleanup(rgi, thd))) slave_rows_error_report(ERROR_LEVEL, thd->is_error() ? 0 : error, - rli, thd, table, + rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); DBUG_RETURN(error); @@ -10981,7 +11034,7 @@ int Table_map_log_event::do_apply_event(rpl_group_info *rgi) table_list->table_id); if (thd->slave_thread) - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), buf); else /* @@ -12605,6 +12658,8 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len, DBUG_PRINT("info",("event_len: %u; common_header_len: %d; post_header_len: %d", event_len, common_header_len, post_header_len)); + m_message.str= NULL; + m_message.length= 0; int incident_number= uint2korr(buf + common_header_len); if (incident_number >= INCIDENT_COUNT || incident_number <= INCIDENT_NONE) @@ -12621,7 +12676,13 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len, uint8 len= 0; // Assignment to keep compiler happy const char *str= NULL; // Assignment to keep compiler happy read_str(&ptr, str_end, &str, &len); - m_message.str= const_cast<char*>(str); + if (!(m_message.str= (char*) my_malloc(len+1, MYF(MY_WME)))) + { + /* Mark this event invalid */ + m_incident= INCIDENT_NONE; + DBUG_VOID_RETURN; + } + strmake(m_message.str, str, len); m_message.length= len; DBUG_PRINT("info", ("m_incident: %d", m_incident)); DBUG_VOID_RETURN; @@ -12630,6 +12691,8 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len, Incident_log_event::~Incident_log_event() { + if (m_message.str) + my_free(m_message.str); } @@ -12724,7 +12787,14 @@ Incident_log_event::do_apply_event(rpl_group_info *rgi) { Relay_log_info const *rli= rgi->rli; DBUG_ENTER("Incident_log_event::do_apply_event"); - rli->report(ERROR_LEVEL, ER_SLAVE_INCIDENT, + + if (ignored_error_code(ER_SLAVE_INCIDENT)) + { + DBUG_PRINT("info", ("Ignoring Incident")); + DBUG_RETURN(0); + } + + rli->report(ERROR_LEVEL, ER_SLAVE_INCIDENT, NULL, ER(ER_SLAVE_INCIDENT), description(), m_message.length > 0 ? m_message.str : "<none>"); diff --git a/sql/log_event.h b/sql/log_event.h index 2091d968558..c0370014c7d 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -4686,7 +4686,16 @@ public: { DBUG_ENTER("Incident_log_event::Incident_log_event"); DBUG_PRINT("enter", ("m_incident: %d", m_incident)); - m_message= msg; + m_message.str= NULL; + m_message.length= 0; + if (!(m_message.str= (char*) my_malloc(msg.length+1, MYF(MY_WME)))) + { + /* Mark this event invalid */ + m_incident= INCIDENT_NONE; + DBUG_VOID_RETURN; + } + strmake(m_message.str, msg.str, msg.length); + m_message.length= msg.length; set_direct_logging(); /* Replicate the incident irregardless of @@skip_replication. */ flags&= ~LOG_EVENT_SKIP_REPLICATION_F; diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 0cb78686243..4bff12ce039 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -108,7 +108,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) Error reporting borrowed from Query_log_event with many excessive simplifications (we don't honour --slave-skip-errors) */ - rli->report(ERROR_LEVEL, actual_error, + rli->report(ERROR_LEVEL, actual_error, NULL, "Error '%s' on opening tables", (actual_error ? ev_thd->get_stmt_da()->message() : "unexpected success or fatal error")); @@ -133,8 +133,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) { DBUG_ASSERT(ptr->m_tabledef_valid); TABLE *conv_table; - if (!ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli), - ptr->table, &conv_table)) + if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table)) { ev_thd->is_slave_error= 1; rgi->slave_close_thread_tables(ev_thd); @@ -234,7 +233,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) break; default: - rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL, "Error in %s event: row application failed. %s", ev->get_type_str(), ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : ""); @@ -251,7 +250,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) if (error) { /* error has occured during the transaction */ - rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL, "Error in %s event: error during transaction execution " "on table %s.%s. %s", ev->get_type_str(), table->s->db.str, @@ -1499,7 +1498,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) simplifications (we don't honour --slave-skip-errors) */ uint actual_error= thd->net.last_errno; - rli->report(ERROR_LEVEL, actual_error, + rli->report(ERROR_LEVEL, actual_error, NULL, "Error '%s' in %s event: when locking tables", (actual_error ? thd->net.last_error : "unexpected success or fatal error"), @@ -1508,7 +1507,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) } else { - rli->report(ERROR_LEVEL, error, + rli->report(ERROR_LEVEL, error, NULL, "Error in %s event: when locking tables", get_type_str()); } @@ -1530,8 +1529,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) ptr= static_cast<RPL_TABLE_LIST*>(ptr->next_global), i++) { TABLE *conv_table; - if (ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli), - ptr->table, &conv_table)) + if (ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table)) { thd->is_slave_error= 1; rgi->slave_close_thread_tables(thd); @@ -1655,7 +1653,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) break; default: - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, thd->net.last_errno, NULL, "Error in %s event: row application failed. %s", get_type_str(), thd->net.last_error ? thd->net.last_error : ""); @@ -1693,7 +1691,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) if (error) { /* error has occured during the transaction */ - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, thd->net.last_errno, NULL, "Error in %s event: error during transaction execution " "on table %s.%s. %s", get_type_str(), table->s->db.str, @@ -1776,7 +1774,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) */ DBUG_ASSERT(! thd->transaction_rollback_request); if ((error= (binlog_error ? trans_rollback_stmt(thd) : trans_commit_stmt(thd)))) - rli->report(ERROR_LEVEL, error, + rli->report(ERROR_LEVEL, error, NULL, "Error in %s event: commit of row events failed, " "table `%s`.`%s`", get_type_str(), m_table->s->db.str, diff --git a/sql/log_slow.h b/sql/log_slow.h index e8faf79a047..2ae07da97c3 100644 --- a/sql/log_slow.h +++ b/sql/log_slow.h @@ -31,6 +31,8 @@ #define QPLAN_QC_NO 1 << 6 #define QPLAN_TMP_DISK 1 << 7 #define QPLAN_TMP_TABLE 1 << 8 +#define QPLAN_FILESORT_PRIORITY_QUEUE 1 << 9 + /* ... */ #define QPLAN_MAX ((ulong) 1) << 31 /* reserved as placeholder */ diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc index d8848c1ee35..3ed9261f630 100644 --- a/sql/mf_iocache.cc +++ b/sql/mf_iocache.cc @@ -57,7 +57,7 @@ int _my_b_net_read(register IO_CACHE *info, uchar *Buffer, if (!info->end_of_file) DBUG_RETURN(1); /* because my_b_get (no _) takes 1 byte at a time */ - read_length=my_net_read(net); + read_length= my_net_read_packet(net, 0); if (read_length == packet_error) { info->error= -1; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 74c3efc714e..f3b618e6987 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -379,6 +379,7 @@ static I_List<THD> thread_cache; static bool binlog_format_used= false; LEX_STRING opt_init_connect, opt_init_slave; static mysql_cond_t COND_thread_cache, COND_flush_thread_cache; +mysql_cond_t COND_slave_init; static DYNAMIC_ARRAY all_options; /* Global variables */ @@ -534,6 +535,7 @@ ulong binlog_stmt_cache_use= 0, binlog_stmt_cache_disk_use= 0; ulong max_connections, max_connect_errors; ulong extra_max_connections; ulong slave_retried_transactions; +ulong feature_files_opened_with_delayed_keys; ulonglong denied_connections; my_decimal decimal_zero; @@ -724,7 +726,7 @@ mysql_mutex_t LOCK_crypt, LOCK_global_system_variables, LOCK_user_conn, LOCK_slave_list, LOCK_active_mi, - LOCK_connection_count, LOCK_error_messages; + LOCK_connection_count, LOCK_error_messages, LOCK_slave_init; mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats, LOCK_global_table_stats, LOCK_global_index_stats; @@ -921,7 +923,8 @@ PSI_mutex_key key_LOCK_stats, key_LOCK_wakeup_ready, key_LOCK_wait_commit; PSI_mutex_key key_LOCK_gtid_waiting; -PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered; +PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered, + key_LOCK_slave_init; PSI_mutex_key key_TABLE_SHARE_LOCK_share; static PSI_mutex_info all_server_mutexes[]= @@ -984,6 +987,7 @@ static PSI_mutex_info all_server_mutexes[]= { &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL}, { &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL}, { &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL}, + { &key_LOCK_slave_init, "LOCK_slave_init", PSI_FLAG_GLOBAL}, { &key_LOG_INFO_lock, "LOG_INFO::lock", 0}, #ifdef WITH_WSREP { &key_LOCK_wsrep_ready, "LOCK_wsrep_ready", PSI_FLAG_GLOBAL}, @@ -1055,7 +1059,7 @@ PSI_cond_key key_TC_LOG_MMAP_COND_queue_busy; PSI_cond_key key_COND_rpl_thread_queue, key_COND_rpl_thread, key_COND_rpl_thread_pool, key_COND_parallel_entry, key_COND_group_commit_orderer, - key_COND_prepare_ordered; + key_COND_prepare_ordered, key_COND_slave_init; PSI_cond_key key_COND_wait_gtid, key_COND_gtid_ignore_duplicates; static PSI_cond_info all_server_conds[]= @@ -1113,6 +1117,7 @@ static PSI_cond_info all_server_conds[]= { &key_COND_parallel_entry, "COND_parallel_entry", 0}, { &key_COND_group_commit_orderer, "COND_group_commit_orderer", 0}, { &key_COND_prepare_ordered, "COND_prepare_ordered", 0}, + { &key_COND_slave_init, "COND_slave_init", 0}, { &key_COND_wait_gtid, "COND_wait_gtid", 0}, { &key_COND_gtid_ignore_duplicates, "COND_gtid_ignore_duplicates", 0} }; @@ -1178,65 +1183,60 @@ void net_before_header_psi(struct st_net *net, void *user_data, size_t /* unused thd= static_cast<THD*> (user_data); DBUG_ASSERT(thd != NULL); - if (thd->m_server_idle) - { - /* - The server is IDLE, waiting for the next command. - Technically, it is a wait on a socket, which may take a long time, - because the call is blocking. - Disable the socket instrumentation, to avoid recording a SOCKET event. - Instead, start explicitly an IDLE event. - */ - MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_IDLE); - MYSQL_START_IDLE_WAIT(thd->m_idle_psi, &thd->m_idle_state); - } + /* + We only come where when the server is IDLE, waiting for the next command. + Technically, it is a wait on a socket, which may take a long time, + because the call is blocking. + Disable the socket instrumentation, to avoid recording a SOCKET event. + Instead, start explicitly an IDLE event. + */ + MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_IDLE); + MYSQL_START_IDLE_WAIT(thd->m_idle_psi, &thd->m_idle_state); } -void net_after_header_psi(struct st_net *net, void *user_data, size_t /* unused: count */, my_bool rc) +void net_after_header_psi(struct st_net *net, void *user_data, + size_t /* unused: count */, my_bool rc) { THD *thd; thd= static_cast<THD*> (user_data); DBUG_ASSERT(thd != NULL); - if (thd->m_server_idle) - { - /* - The server just got data for a network packet header, - from the network layer. - The IDLE event is now complete, since we now have a message to process. - We need to: - - start a new STATEMENT event - - start a new STAGE event, within this statement, - - start recording SOCKET WAITS events, within this stage. - The proper order is critical to get events numbered correctly, - and nested in the proper parent. - */ - MYSQL_END_IDLE_WAIT(thd->m_idle_psi); - - if (! rc) - { - thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state, - stmt_info_new_packet.m_key, - thd->db, thd->db_length, - thd->charset()); + /* + The server just got data for a network packet header, + from the network layer. + The IDLE event is now complete, since we now have a message to process. + We need to: + - start a new STATEMENT event + - start a new STAGE event, within this statement, + - start recording SOCKET WAITS events, within this stage. + The proper order is critical to get events numbered correctly, + and nested in the proper parent. + */ + MYSQL_END_IDLE_WAIT(thd->m_idle_psi); - THD_STAGE_INFO(thd, stage_init); - } + if (! rc) + { + thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state, + stmt_info_new_packet.m_key, + thd->db, thd->db_length, + thd->charset()); - /* - TODO: consider recording a SOCKET event for the bytes just read, - by also passing count here. - */ - MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_ACTIVE); + THD_STAGE_INFO(thd, stage_init); } + + /* + TODO: consider recording a SOCKET event for the bytes just read, + by also passing count here. + */ + MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_ACTIVE); } + void init_net_server_extension(THD *thd) { /* Start with a clean state for connection events. */ thd->m_idle_psi= NULL; thd->m_statement_psi= NULL; - thd->m_server_idle= false; /* Hook up the NET_SERVER callback in the net layer. */ thd->m_net_server_extension.m_user_data= thd; thd->m_net_server_extension.m_before_header= net_before_header_psi; @@ -2312,6 +2312,8 @@ static void clean_up_mutexes() mysql_mutex_destroy(&LOCK_prepare_ordered); mysql_cond_destroy(&COND_prepare_ordered); mysql_mutex_destroy(&LOCK_commit_ordered); + mysql_mutex_destroy(&LOCK_slave_init); + mysql_cond_destroy(&COND_slave_init); DBUG_VOID_RETURN; } @@ -2503,6 +2505,7 @@ static MYSQL_SOCKET activate_tcp_port(uint port) int error; int arg; char port_buf[NI_MAXSERV]; + const char *real_bind_addr_str; MYSQL_SOCKET ip_sock= MYSQL_INVALID_SOCKET; DBUG_ENTER("activate_tcp_port"); DBUG_PRINT("general",("IP Socket is %d",port)); @@ -2511,16 +2514,36 @@ static MYSQL_SOCKET activate_tcp_port(uint port) hints.ai_flags= AI_PASSIVE; hints.ai_socktype= SOCK_STREAM; hints.ai_family= AF_UNSPEC; + + if (my_bind_addr_str && strcmp(my_bind_addr_str, "*") == 0) + real_bind_addr_str= NULL; // windows doesn't seem to support * here + else + real_bind_addr_str= my_bind_addr_str; my_snprintf(port_buf, NI_MAXSERV, "%d", port); - error= getaddrinfo(my_bind_addr_str, port_buf, &hints, &ai); + error= getaddrinfo(real_bind_addr_str, port_buf, &hints, &ai); if (error != 0) { DBUG_PRINT("error",("Got error: %d from getaddrinfo()", error)); - sql_perror(ER_DEFAULT(ER_IPSOCK_ERROR)); /* purecov: tested */ + + sql_print_error("%s: %s", ER_DEFAULT(ER_IPSOCK_ERROR), gai_strerror(error)); unireg_abort(1); /* purecov: tested */ } + /* + special case: for wildcard addresses prefer ipv6 over ipv4, + because we later switch off IPV6_V6ONLY, so ipv6 wildcard + addresses will work for ipv4 too + */ + if (!real_bind_addr_str && ai->ai_family == AF_INET && ai->ai_next + && ai->ai_next->ai_family == AF_INET6) + { + a= ai; + ai= ai->ai_next; + a->ai_next= ai->ai_next; + ai->ai_next= a; + } + for (a= ai; a != NULL; a= a->ai_next) { ip_sock= mysql_socket_socket(key_socket_tcpip, a->ai_family, @@ -4562,6 +4585,9 @@ static int init_thread_environment() mysql_cond_init(key_COND_prepare_ordered, &COND_prepare_ordered, NULL); mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered, MY_MUTEX_INIT_SLOW); + mysql_mutex_init(key_LOCK_slave_init, &LOCK_slave_init, + MY_MUTEX_INIT_SLOW); + mysql_cond_init(key_COND_slave_init, &COND_slave_init, NULL); #ifdef HAVE_OPENSSL mysql_mutex_init(key_LOCK_des_key_file, @@ -5038,6 +5064,8 @@ a file name for --log-bin-index option", opt_binlog_index_name); if (ha_init_errors()) DBUG_RETURN(1); + tc_log= 0; // ha_initialize_handlerton() needs that + if (plugin_init(&remaining_argc, remaining_argv, (opt_noacl ? PLUGIN_INIT_SKIP_PLUGIN_TABLE : 0) | (opt_abort ? PLUGIN_INIT_SKIP_INITIALIZATION : 0))) @@ -5179,18 +5207,14 @@ a file name for --log-bin-index option", opt_binlog_index_name); wsrep_emulate_bin_log= 1; } #endif - tc_log= (total_ha_2pc > 1 ? (opt_bin_log ? - (TC_LOG *) &mysql_bin_log : -#ifdef WITH_WSREP - (WSREP_ON ? - (TC_LOG *) &tc_log_dummy : - (TC_LOG *) &tc_log_mmap)) : -#else - (TC_LOG *) &tc_log_mmap) : -#endif - (TC_LOG *) &tc_log_dummy); + + tc_log= get_tc_log_implementation(); + #ifdef WITH_WSREP - WSREP_DEBUG("Initial TC log open: %s", + if (WSREP_ON && tc_log == &tc_log_mmap) + tc_log= &tc_log_dummy; + + WSREP_DEBUG("Initial TC log open: %s", (tc_log == &mysql_bin_log) ? "binlog" : (tc_log == &tc_log_mmap) ? "mmap" : (tc_log == &tc_log_dummy) ? "dummy" : "unknown" @@ -7511,7 +7535,7 @@ struct my_option my_long_options[]= {"autocommit", 0, "Set default value for autocommit (0 or 1)", &opt_autocommit, &opt_autocommit, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, NULL}, - {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.", + {"bind-address", 0, "IP address to bind to.", &my_bind_addr_str, &my_bind_addr_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog-do-db", OPT_BINLOG_DO_DB, @@ -8597,6 +8621,7 @@ SHOW_VAR status_vars[]= { {"Empty_queries", (char*) offsetof(STATUS_VAR, empty_queries), SHOW_LONG_STATUS}, {"Executed_events", (char*) &executed_events, SHOW_LONG_NOFLUSH }, {"Executed_triggers", (char*) offsetof(STATUS_VAR, executed_triggers), SHOW_LONG_STATUS}, + {"Feature_delay_key_write", (char*) &feature_files_opened_with_delayed_keys, SHOW_LONG }, {"Feature_dynamic_columns", (char*) offsetof(STATUS_VAR, feature_dynamic_columns), SHOW_LONG_STATUS}, {"Feature_fulltext", (char*) offsetof(STATUS_VAR, feature_fulltext), SHOW_LONG_STATUS}, {"Feature_gis", (char*) offsetof(STATUS_VAR, feature_gis), SHOW_LONG_STATUS}, @@ -8679,6 +8704,7 @@ SHOW_VAR status_vars[]= { {"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG}, {"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS}, {"Sort_merge_passes", (char*) offsetof(STATUS_VAR, filesort_merge_passes_), SHOW_LONG_STATUS}, + {"Sort_priority_queue_sorts",(char*) offsetof(STATUS_VAR, filesort_pq_sorts_), SHOW_LONG_STATUS}, {"Sort_range", (char*) offsetof(STATUS_VAR, filesort_range_count_), SHOW_LONG_STATUS}, {"Sort_rows", (char*) offsetof(STATUS_VAR, filesort_rows_), SHOW_LONG_STATUS}, {"Sort_scan", (char*) offsetof(STATUS_VAR, filesort_scan_count_), SHOW_LONG_STATUS}, @@ -9370,18 +9396,6 @@ mysqld_get_one_option(int optid, } break; #endif /* defined(ENABLED_DEBUG_SYNC) */ - case OPT_ENGINE_CONDITION_PUSHDOWN: - /* - The last of --engine-condition-pushdown and --optimizer_switch on - command line wins (see get_options(). - */ - if (global_system_variables.engine_condition_pushdown) - global_system_variables.optimizer_switch|= - OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN; - else - global_system_variables.optimizer_switch&= - ~OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN; - break; case OPT_LOG_ERROR: /* "No --log-error" == "write errors to stderr", @@ -9504,6 +9518,7 @@ mysql_getopt_value(const char *name, uint length, case OPT_KEY_CACHE_DIVISION_LIMIT: case OPT_KEY_CACHE_AGE_THRESHOLD: case OPT_KEY_CACHE_PARTITIONS: + case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE: { KEY_CACHE *key_cache; if (!(key_cache= get_or_create_key_cache(name, length))) @@ -9523,6 +9538,8 @@ mysql_getopt_value(const char *name, uint length, return &key_cache->param_age_threshold; case OPT_KEY_CACHE_PARTITIONS: return (uchar**) &key_cache->param_partitions; + case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE: + return (uchar**) &key_cache->changed_blocks_hash_size; } } case OPT_REPLICATE_DO_DB: diff --git a/sql/mysqld.h b/sql/mysqld.h index dd774e0d565..4e6d9026195 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -523,7 +523,8 @@ extern mysql_mutex_t LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone, LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_system_variables, LOCK_user_conn, - LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count; + LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count, + LOCK_slave_init; extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_thread_count; #ifdef HAVE_OPENSSL extern mysql_mutex_t LOCK_des_key_file; @@ -534,6 +535,7 @@ extern mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; extern mysql_rwlock_t LOCK_system_variables_hash; extern mysql_cond_t COND_thread_count; extern mysql_cond_t COND_manager; +extern mysql_cond_t COND_slave_init; extern int32 thread_running; extern int32 thread_count; extern my_atomic_rwlock_t thread_running_lock, thread_count_lock; @@ -553,7 +555,6 @@ extern MYSQL_PLUGIN_IMPORT pthread_key(THD*, THR_THD); enum options_mysqld { OPT_to_set_the_start_number=256, - OPT_BIND_ADDRESS, OPT_BINLOG_DO_DB, OPT_BINLOG_FORMAT, OPT_BINLOG_IGNORE_DB, @@ -561,9 +562,7 @@ enum options_mysqld OPT_BOOTSTRAP, OPT_CONSOLE, OPT_DEBUG_SYNC_TIMEOUT, - OPT_DELAY_KEY_WRITE_ALL, OPT_DEPRECATED_OPTION, - OPT_ENGINE_CONDITION_PUSHDOWN, OPT_IGNORE_DB_DIRECTORY, OPT_ISAM_LOG, OPT_KEY_BUFFER_SIZE, @@ -571,6 +570,7 @@ enum options_mysqld OPT_KEY_CACHE_BLOCK_SIZE, OPT_KEY_CACHE_DIVISION_LIMIT, OPT_KEY_CACHE_PARTITIONS, + OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE, OPT_LOG_BASENAME, OPT_LOG_ERROR, OPT_LOWER_CASE_TABLE_NAMES, @@ -578,7 +578,6 @@ enum options_mysqld OPT_PLUGIN_LOAD, OPT_PLUGIN_LOAD_ADD, OPT_PFS_INSTRUMENT, - OPT_POOL_OF_THREADS, OPT_REPLICATE_DO_DB, OPT_REPLICATE_DO_TABLE, OPT_REPLICATE_IGNORE_DB, @@ -589,10 +588,7 @@ enum options_mysqld OPT_SAFE, OPT_SERVER_ID, OPT_SKIP_HOST_CACHE, - OPT_SKIP_LOCK, OPT_SKIP_RESOLVE, - OPT_SKIP_STACK_TRACE, - OPT_SKIP_SYMLINKS, OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CERT, @@ -600,7 +596,6 @@ enum options_mysqld OPT_SSL_CRL, OPT_SSL_CRLPATH, OPT_SSL_KEY, - OPT_UPDATE_LOG, OPT_WANT_CORE, OPT_MYSQL_COMPATIBILITY, OPT_MYSQL_TO_BE_IMPLEMENTED, diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 546542fa207..eb34fcc2d77 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -824,7 +824,8 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, */ static ulong -my_real_read(NET *net, size_t *complen) +my_real_read(NET *net, size_t *complen, + my_bool header __attribute__((unused))) { uchar *pos; size_t length; @@ -839,14 +840,16 @@ my_real_read(NET *net, size_t *complen) NET_HEADER_SIZE); #ifdef MYSQL_SERVER size_t count= remain; - struct st_net_server *server_extension; - server_extension= static_cast<st_net_server*> (net->extension); - if (server_extension != NULL) + struct st_net_server *server_extension= 0; + + if (header) { - void *user_data= server_extension->m_user_data; - DBUG_ASSERT(server_extension->m_before_header != NULL); - DBUG_ASSERT(server_extension->m_after_header != NULL); - server_extension->m_before_header(net, user_data, count); + server_extension= static_cast<st_net_server*> (net->extension); + if (server_extension != NULL) + { + void *user_data= server_extension->m_user_data; + server_extension->m_before_header(net, user_data, count); + } } #endif @@ -1042,6 +1045,16 @@ end: } +/* Old interface. See my_net_read_packet() for function description */ + +#undef my_net_read + +ulong my_net_read(NET *net) +{ + return my_net_read_packet(net, 0); +} + + /** Read a packet from the client/server and return it without the internal package header. @@ -1053,13 +1066,17 @@ end: If the packet was compressed, its uncompressed and the length of the uncompressed packet is returned. + read_from_server is set when the server is reading a new command + from the client. + @return The function returns the length of the found packet or packet_error. net->read_pos points to the read data. */ + ulong -my_net_read(NET *net) +my_net_read_packet(NET *net, my_bool read_from_server) { size_t len, complen; @@ -1069,7 +1086,7 @@ my_net_read(NET *net) if (!net->compress) { #endif - len = my_real_read(net,&complen); + len = my_real_read(net,&complen, read_from_server); if (len == MAX_PACKET_LENGTH) { /* First packet of a multi-packet. Concatenate the packets */ @@ -1079,7 +1096,7 @@ my_net_read(NET *net) { net->where_b += len; total_length += len; - len = my_real_read(net,&complen); + len = my_real_read(net,&complen, 0); } while (len == MAX_PACKET_LENGTH); if (len != packet_error) len+= total_length; @@ -1171,11 +1188,13 @@ my_net_read(NET *net) } net->where_b=buf_length; - if ((packet_len = my_real_read(net,&complen)) == packet_error) + if ((packet_len = my_real_read(net,&complen, read_from_server)) + == packet_error) { MYSQL_NET_READ_DONE(1, 0); return packet_error; } + read_from_server= 0; if (my_uncompress(net->buff + net->where_b, packet_len, &complen)) { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index fc2aa75e604..b38b048c344 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2013, Monty Program Ab. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2008, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -359,31 +359,54 @@ public: elements(1),use_count(1),left(0),right(0), next_key_part(0), color(BLACK), type(type_arg) {} - inline bool is_same(SEL_ARG *arg) + /** + returns true if a range predicate is equal. Use all_same() + to check for equality of all the predicates on this keypart. + */ + inline bool is_same(const SEL_ARG *arg) const { if (type != arg->type || part != arg->part) - return 0; + return false; if (type != KEY_RANGE) - return 1; + return true; return cmp_min_to_min(arg) == 0 && cmp_max_to_max(arg) == 0; } + /** + returns true if all the predicates in the keypart tree are equal + */ + bool all_same(const SEL_ARG *arg) const + { + if (type != arg->type || part != arg->part) + return false; + if (type != KEY_RANGE) + return true; + if (arg == this) + return true; + const SEL_ARG *cmp_arg= arg->first(); + const SEL_ARG *cur_arg= first(); + for (; cur_arg && cmp_arg && cur_arg->is_same(cmp_arg); + cur_arg= cur_arg->next, cmp_arg= cmp_arg->next) ; + if (cur_arg || cmp_arg) + return false; + return true; + } inline void merge_flags(SEL_ARG *arg) { maybe_flag|=arg->maybe_flag; } inline void maybe_smaller() { maybe_flag=1; } /* Return true iff it's a single-point null interval */ inline bool is_null_interval() { return maybe_null && max_value[0] == 1; } - inline int cmp_min_to_min(SEL_ARG* arg) + inline int cmp_min_to_min(const SEL_ARG* arg) const { return sel_cmp(field,min_value, arg->min_value, min_flag, arg->min_flag); } - inline int cmp_min_to_max(SEL_ARG* arg) + inline int cmp_min_to_max(const SEL_ARG* arg) const { return sel_cmp(field,min_value, arg->max_value, min_flag, arg->max_flag); } - inline int cmp_max_to_max(SEL_ARG* arg) + inline int cmp_max_to_max(const SEL_ARG* arg) const { return sel_cmp(field,max_value, arg->max_value, max_flag, arg->max_flag); } - inline int cmp_max_to_min(SEL_ARG* arg) + inline int cmp_max_to_min(const SEL_ARG* arg) const { return sel_cmp(field,max_value, arg->min_value, max_flag, arg->min_flag); } @@ -563,6 +586,7 @@ public: void test_use_count(SEL_ARG *root); #endif SEL_ARG *first(); + const SEL_ARG *first() const; SEL_ARG *last(); void make_root(); inline bool simple_key() @@ -652,6 +676,18 @@ public: SEL_ARG *clone_tree(RANGE_OPT_PARAM *param); }; +/** + Helper function to compare two SEL_ARG's. +*/ +static bool all_same(const SEL_ARG *sa1, const SEL_ARG *sa2) +{ + if (sa1 == NULL && sa2 == NULL) + return true; + if ((sa1 != NULL && sa2 == NULL) || (sa1 == NULL && sa2 != NULL)) + return false; + return sa1->all_same(sa2); +} + class SEL_IMERGE; #define CLONE_KEY1_MAYBE 1 @@ -2495,6 +2531,13 @@ SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, return tmp; } +/** + This gives the first SEL_ARG in the interval list, and the minimal element + in the red-black tree + + @return + SEL_ARG first SEL_ARG in the interval list +*/ SEL_ARG *SEL_ARG::first() { SEL_ARG *next_arg=this; @@ -2505,6 +2548,11 @@ SEL_ARG *SEL_ARG::first() return next_arg; } +const SEL_ARG *SEL_ARG::first() const +{ + return const_cast<SEL_ARG*>(this)->first(); +} + SEL_ARG *SEL_ARG::last() { SEL_ARG *next_arg=this; @@ -11043,6 +11091,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, uint part; bool create_err= FALSE; Cost_estimate cost; + uint max_used_key_len; old_root= thd->mem_root; /* The following call may change thd->mem_root */ @@ -11069,12 +11118,13 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, range->min_length= range->max_length= ref->key_length; range->min_keypart_map= range->max_keypart_map= make_prev_keypart_map(ref->key_parts); - range->flag= (ref->key_length == key_info->key_length ? EQ_RANGE : 0); + range->flag= EQ_RANGE; if (!(quick->key_parts=key_part=(KEY_PART *) alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts))) goto err; - + + max_used_key_len=0; for (part=0 ; part < ref->key_parts ;part++,key_part++) { key_part->part=part; @@ -11083,7 +11133,12 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; key_part->flag= (uint8) key_info->key_part[part].key_part_flag; + + max_used_key_len +=key_info->key_part[part].store_length; } + + quick->max_used_key_length= max_used_key_len; + if (insert_dynamic(&quick->ranges,(uchar*)&range)) goto err; @@ -12334,6 +12389,66 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, } +void QUICK_RANGE_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + uint key_len; + KEY_PART *part= key_parts; + for (key_len=0; key_len < max_used_key_length; + key_len += (part++)->store_length) + { + bitmap_set_bit(col_set, part->field->field_index); + } +} + + +void QUICK_GROUP_MIN_MAX_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + uint key_len; + KEY_PART_INFO *part= index_info->key_part; + for (key_len=0; key_len < max_used_key_length; + key_len += (part++)->store_length) + { + bitmap_set_bit(col_set, part->field->field_index); + } +} + + +void QUICK_ROR_INTERSECT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + List_iterator_fast<QUICK_SELECT_WITH_RECORD> it(quick_selects); + QUICK_SELECT_WITH_RECORD *quick; + while ((quick= it++)) + { + quick->quick->add_used_key_part_to_set(col_set); + } +} + + +void QUICK_INDEX_SORT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + quick->add_used_key_part_to_set(col_set); + } + if (pk_quick_select) + pk_quick_select->add_used_key_part_to_set(col_set); +} + + +void QUICK_ROR_UNION_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + + while ((quick= it++)) + { + quick->add_used_key_part_to_set(col_set); + } +} + + /******************************************************************************* * Implementation of QUICK_GROUP_MIN_MAX_SELECT *******************************************************************************/ @@ -12341,6 +12456,8 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, static inline uint get_field_keypart(KEY *index, Field *field); static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, uint *param_idx); +static bool get_sel_arg_for_keypart(Field *field, SEL_ARG *index_range_tree, + SEL_ARG **cur_range); static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, KEY_PART_INFO *first_non_group_part, KEY_PART_INFO *min_max_arg_part, @@ -12406,6 +12523,16 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, never stored after a unique key lookup in the clustered index and furhter index_next/prev calls can not be used. So loose index scan optimization can not be used in this case. + SA7. If Q has both AGG_FUNC(DISTINCT ...) and MIN/MAX() functions then this + access method is not used. + For above queries MIN/MAX() aggregation has to be done at + nested_loops_join (end_send_group). But with current design MIN/MAX() + is always set as part of loose index scan. Because of this mismatch + MIN() and MAX() values will be set incorrectly. For such queries to + work we need a new interface for loose index scan. This new interface + should only fetch records with min and max values and let + end_send_group to do aggregation. Until then do not use + loose_index_scan. GA1. If Q has a GROUP BY clause, then GA is a prefix of I. That is, if G_i = A_j => i = j. GA2. If Q has a DISTINCT clause, then there is a permutation of SA that @@ -12437,6 +12564,8 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, above tests. By transitivity then it also follows that each WA_i participates in the index I (if this was already tested for GA, NGA and C). + WA2. If there is a predicate on C, then it must be in conjunction + to all predicates on all earlier keyparts in I. C) Overall query form: SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)]) @@ -12571,6 +12700,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) DBUG_RETURN(NULL); } } + + /* Check (SA7). */ + if (is_agg_distinct && (have_max || have_min)) + { + DBUG_RETURN(NULL); + } + /* Check (SA5). */ if (join->select_distinct) { @@ -12860,6 +12996,25 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) } } + /** + Test WA2:If there are conditions on a column C participating in + MIN/MAX, those conditions must be conjunctions to all earlier + keyparts. Otherwise, Loose Index Scan cannot be used. + */ + if (tree && min_max_arg_item) + { + uint dummy; + SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param, + &dummy); + SEL_ARG *cur_range= NULL; + if (get_sel_arg_for_keypart(min_max_arg_part->field, + index_range_tree, &cur_range) || + (cur_range && cur_range->type != SEL_ARG::KEY_RANGE)) + { + goto next_index; + } + } + /* If we got to this point, cur_index_info passes the test. */ key_infix_parts= cur_key_infix_len ? (uint) (first_non_infix_part - first_non_group_part) : 0; @@ -13177,73 +13332,75 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, /* - Get SEL_ARG tree, if any, for the keypart covering non grouping - attribute (NGA) field 'nga_field'. + Get the SEL_ARG tree 'tree' for the keypart covering 'field', if + any. 'tree' must be a unique conjunction to ALL predicates in earlier + keyparts of 'keypart_tree'. + + E.g., if 'keypart_tree' is for a composite index (kp1,kp2) and kp2 + covers 'field', all these conditions satisfies the requirement: - This function enforces the NGA3 test: If 'keypart_tree' contains a - condition for 'nga_field', there can only be one range. In the - opposite case, this function returns with error and 'cur_range' - should not be used. + 1. "(kp1=2 OR kp1=3) AND kp2=10" => returns "kp2=10" + 2. "(kp1=2 AND kp2=10) OR (kp1=3 AND kp2=10)" => returns "kp2=10" + 3. "(kp1=2 AND (kp2=10 OR kp2=11)) OR (kp1=3 AND (kp2=10 OR kp2=11))" + => returns "kp2=10 OR kp2=11" - Note that the NGA1 and NGA2 requirements, like whether or not the - range predicate for 'nga_field' is equality, is not tested by this - function. + whereas these do not + 1. "(kp1=2 AND kp2=10) OR kp1=3" + 2. "(kp1=2 AND kp2=10) OR (kp1=3 AND kp2=11)" + 3. "(kp1=2 AND kp2=10) OR (kp1=3 AND (kp2=10 OR kp2=11))" - @param[in] nga_field The NGA field we want the SEL_ARG tree for + This function effectively tests requirement WA2. In combination with + a test that the returned tree has no more than one range it is also + a test of NGA3. + + @param[in] field The field we want the SEL_ARG tree for @param[in] keypart_tree Root node of the SEL_ARG* tree for the index @param[out] cur_range The SEL_ARG tree, if any, for the keypart covering field 'keypart_field' - @retval true 'keypart_tree' contained a predicate for 'nga_field' but - multiple ranges exists. 'cur_range' should not be used. + @retval true 'keypart_tree' contained a predicate for 'field' that + is not conjunction to all predicates on earlier keyparts @retval false otherwise */ static bool -get_sel_arg_for_keypart(Field *nga_field, +get_sel_arg_for_keypart(Field *field, SEL_ARG *keypart_tree, SEL_ARG **cur_range) { - if(keypart_tree == NULL) + if (keypart_tree == NULL) return false; - if(keypart_tree->field->eq(nga_field)) + if (keypart_tree->field->eq(field)) { - /* - Enforce NGA3: If a condition for nga_field has been found, only - a single range is allowed. - */ - if (keypart_tree->prev || keypart_tree->next) - return true; // There are multiple ranges - *cur_range= keypart_tree; return false; } - SEL_ARG *found_tree= NULL; + SEL_ARG *tree_first_range= NULL; SEL_ARG *first_kp= keypart_tree->first(); - for (SEL_ARG *cur_kp= first_kp; cur_kp && !found_tree; - cur_kp= cur_kp->next) + for (SEL_ARG *cur_kp= first_kp; cur_kp; cur_kp= cur_kp->next) { + SEL_ARG *curr_tree= NULL; if (cur_kp->next_key_part) { - if (get_sel_arg_for_keypart(nga_field, + if (get_sel_arg_for_keypart(field, cur_kp->next_key_part, - &found_tree)) + &curr_tree)) return true; - } /* - Enforce NGA3: If a condition for nga_field has been found,only - a single range is allowed. - */ - if (found_tree && first_kp->next) - return true; // There are multiple ranges + Check if the SEL_ARG tree for 'field' is identical for all ranges in + 'keypart_tree + */ + if (cur_kp == first_kp) + tree_first_range= curr_tree; + else if (!all_same(tree_first_range, curr_tree)) + return true; } - *cur_range= found_tree; + *cur_range= tree_first_range; return false; } - /* Extract a sequence of constants from a conjunction of equality predicates. @@ -13266,7 +13423,8 @@ get_sel_arg_for_keypart(Field *nga_field, (const_ci = NG_i).. In addition, there can only be one range when there is such a gap. Thus all the NGF_i attributes must fill the 'gap' between the last group-by - attribute and the MIN/MAX attribute in the index (if present). If these + attribute and the MIN/MAX attribute in the index (if present). Also ensure + that there is only a single range on NGF_i (NGA3). If these conditions hold, copy each constant from its corresponding predicate into key_infix, in the order its NG_i attribute appears in the index, and update key_infix_len with the total length of the key parts in key_infix. @@ -13275,7 +13433,6 @@ get_sel_arg_for_keypart(Field *nga_field, TRUE if the index passes the test FALSE o/w */ - static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, KEY_PART_INFO *first_non_group_part, @@ -13295,32 +13452,42 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, { cur_range= NULL; /* - Find the range tree for the current keypart. We assume that - index_range_tree points to the first keypart in the index. + Check NGA3: + 1. get_sel_arg_for_keypart gets the range tree for the 'field' and also + checks for a unique conjunction of this tree with all the predicates + on the earlier keyparts in the index. + 2. Check for multiple ranges on the found keypart tree. + + We assume that index_range_tree points to the leftmost keypart in + the index. */ - if(get_sel_arg_for_keypart(cur_part->field, index_range_tree, &cur_range)) + if (get_sel_arg_for_keypart(cur_part->field, index_range_tree, + &cur_range)) + return false; + + if (cur_range && cur_range->elements > 1) return false; if (!cur_range || cur_range->type != SEL_ARG::KEY_RANGE) { if (min_max_arg_part) - return FALSE; /* The current keypart has no range predicates at all. */ + return false; /* The current keypart has no range predicates at all. */ else { *first_non_infix_part= cur_part; - return TRUE; + return true; } } if ((cur_range->min_flag & NO_MIN_RANGE) || (cur_range->max_flag & NO_MAX_RANGE) || (cur_range->min_flag & NEAR_MIN) || (cur_range->max_flag & NEAR_MAX)) - return FALSE; + return false; uint field_length= cur_part->store_length; if (cur_range->maybe_null && cur_range->min_value[0] && cur_range->max_value[0]) - { + { /* cur_range specifies 'IS NULL'. In this case the argument points to a "null value" (is_null_string) that may not always be long @@ -13339,7 +13506,7 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, *key_infix_len+= field_length; } else - return FALSE; + return false; } if (!min_max_arg_part && (cur_part == last_part)) diff --git a/sql/opt_range.h b/sql/opt_range.h index 1ca245ea420..54b15826d1b 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -389,6 +389,13 @@ public: Returns a QUICK_SELECT with reverse order of to the index. */ virtual QUICK_SELECT_I *make_reverse(uint used_key_parts_arg) { return NULL; } + + /* + Add the key columns used by the quick select into table's read set. + + This is used by an optimization in filesort. + */ + virtual void add_used_key_part_to_set(MY_BITMAP *col_set)=0; }; @@ -479,6 +486,9 @@ public: #endif virtual void replace_handler(handler *new_file) { file= new_file; } QUICK_SELECT_I *make_reverse(uint used_key_parts_arg); + + virtual void add_used_key_part_to_set(MY_BITMAP *col_set); + private: /* Default copy ctor used by QUICK_SELECT_DESC */ friend class TRP_ROR_INTERSECT; @@ -640,6 +650,8 @@ public: virtual int read_keys_and_merge()= 0; /* used to get rows collected in Unique */ READ_RECORD read_record; + + virtual void add_used_key_part_to_set(MY_BITMAP *col_set); }; @@ -714,6 +726,7 @@ public: void add_keys_and_lengths(String *key_names, String *used_lengths); Explain_quick_select *get_explain(MEM_ROOT *alloc); bool is_keys_used(const MY_BITMAP *fields); + void add_used_key_part_to_set(MY_BITMAP *col_set); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif @@ -793,6 +806,7 @@ public: void add_keys_and_lengths(String *key_names, String *used_lengths); Explain_quick_select *get_explain(MEM_ROOT *alloc); bool is_keys_used(const MY_BITMAP *fields); + void add_used_key_part_to_set(MY_BITMAP *col_set); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif @@ -935,6 +949,7 @@ public: bool unique_key_range() { return false; } int get_type() { return QS_TYPE_GROUP_MIN_MAX; } void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_used_key_part_to_set(MY_BITMAP *col_set); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif diff --git a/sql/records.cc b/sql/records.cc index 1b230c41156..cce6272a4e3 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -287,9 +287,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, thd->variables.read_buff_size); } /* Condition pushdown to storage engine */ - if ((thd->variables.optimizer_switch & - OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN) && - select && select->cond && + if (thd->use_cond_push(table->file) && select && select->cond && (select->cond->used_tables() & table->map) && !table->file->pushed_cond) table->file->cond_push(select->cond); diff --git a/sql/replication.h b/sql/replication.h index 510e56a3085..fc48ecd9ffc 100644 --- a/sql/replication.h +++ b/sql/replication.h @@ -16,6 +16,20 @@ #ifndef REPLICATION_H #define REPLICATION_H +/*************************************************************************** + NOTE: plugin locking. + This API was created specifically for the semisync plugin and its locking + logic is also matches semisync plugin usage pattern. In particular, a plugin + is locked on Binlog_transmit_observer::transmit_start and is unlocked after + Binlog_transmit_observer::transmit_stop. All other master observable events + happen between these two and don't lock the plugin at all. This works well + for the semisync_master plugin. + + Also a plugin is locked on Binlog_relay_IO_observer::thread_start + and unlocked after Binlog_relay_IO_observer::thread_stop. This works well for + the semisync_slave plugin. +***************************************************************************/ + #include <mysql.h> typedef struct st_mysql MYSQL; diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index 105bdad6f97..c8d5e2a2db0 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -65,16 +65,16 @@ rpl_slave_state::update_state_hash(uint64 sub_id, rpl_gtid *gtid, int rpl_slave_state::record_and_update_gtid(THD *thd, rpl_group_info *rgi) { - uint64 sub_id; DBUG_ENTER("rpl_slave_state::record_and_update_gtid"); /* Update the GTID position, if we have it and did not already update it in a GTID transaction. */ - if ((sub_id= rgi->gtid_sub_id)) + if (rgi->gtid_pending) { - rgi->gtid_sub_id= 0; + uint64 sub_id= rgi->gtid_sub_id; + rgi->gtid_pending= false; if (rgi->gtid_ignore_duplicate_state!=rpl_group_info::GTID_DUPLICATE_IGNORE) { if (record_gtid(thd, &rgi->current_gtid, sub_id, false, false)) @@ -120,7 +120,7 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi) uint32 seq_no= gtid->seq_no; rpl_slave_state::element *elem; int res; - bool did_enter_cond; + bool did_enter_cond= false; PSI_stage_info old_stage; THD *thd; Relay_log_info *rli= rgi->rli; @@ -138,7 +138,6 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi) each lock release and re-take. */ - did_enter_cond= false; for (;;) { if (elem->highest_seq_no >= seq_no) @@ -667,7 +666,7 @@ end: if (table_opened) { - if (err) + if (err || (err= ha_commit_trans(thd, FALSE))) { /* If error, we need to put any remaining elist back into the HASH so we @@ -681,13 +680,8 @@ end: } ha_rollback_trans(thd, FALSE); - close_thread_tables(thd); - } - else - { - ha_commit_trans(thd, FALSE); - close_thread_tables(thd); } + close_thread_tables(thd); if (in_transaction) thd->mdl_context.release_statement_locks(); else diff --git a/sql/rpl_handler.cc b/sql/rpl_handler.cc index 2777dabf451..34d3df23435 100644 --- a/sql/rpl_handler.cc +++ b/sql/rpl_handler.cc @@ -170,40 +170,16 @@ void delegates_destroy() /* This macro is used by almost all the Delegate methods to iterate over all the observers running given callback function of the - delegate . - - Add observer plugins to the thd->lex list, after each statement, all - plugins add to thd->lex will be automatically unlocked. + delegate. */ -#define FOREACH_OBSERVER(r, f, thd, args) \ +#define FOREACH_OBSERVER(r, f, do_lock, args) \ param.server_id= thd->variables.server_id; \ - /* - Use a struct to make sure that they are allocated adjacent, check - delete_dynamic(). - */ \ - struct { \ - DYNAMIC_ARRAY plugins; \ - /* preallocate 8 slots */ \ - plugin_ref plugins_buffer[8]; \ - } s; \ - DYNAMIC_ARRAY *plugins= &s.plugins; \ - plugin_ref *plugins_buffer= s.plugins_buffer; \ - my_init_dynamic_array2(plugins, sizeof(plugin_ref), \ - plugins_buffer, 8, 8, MYF(0)); \ read_lock(); \ Observer_info_iterator iter= observer_info_iter(); \ Observer_info *info= iter++; \ for (; info; info= iter++) \ { \ - plugin_ref plugin= \ - my_plugin_lock(0, info->plugin); \ - if (!plugin) \ - { \ - /* plugin is not intialized or deleted, this is not an error */ \ - r= 0; \ - break; \ - } \ - insert_dynamic(plugins, (uchar *)&plugin); \ + if (do_lock) plugin_lock(thd, plugin_int_to_ref(info->plugin_int)); \ if (((Observer *)info->observer)->f \ && ((Observer *)info->observer)->f args) \ { \ @@ -213,17 +189,7 @@ void delegates_destroy() break; \ } \ } \ - unlock(); \ - /* - Unlock plugins should be done after we released the Delegate lock - to avoid possible deadlock when this is the last user of the - plugin, and when we unlock the plugin, it will try to - deinitialize the plugin, which will try to lock the Delegate in - order to remove the observers. - */ \ - plugin_unlock_list(0, (plugin_ref*)plugins->buffer, \ - plugins->elements); \ - delete_dynamic(plugins) + unlock(); int Trans_delegate::after_commit(THD *thd, bool all) @@ -240,7 +206,7 @@ int Trans_delegate::after_commit(THD *thd, bool all) param.log_pos= log_info ? log_info->log_pos : 0; int ret= 0; - FOREACH_OBSERVER(ret, after_commit, thd, (¶m)); + FOREACH_OBSERVER(ret, after_commit, false, (¶m)); /* This is the end of a real transaction or autocommit statement, we @@ -268,7 +234,7 @@ int Trans_delegate::after_rollback(THD *thd, bool all) param.log_pos= log_info ? log_info->log_pos : 0; int ret= 0; - FOREACH_OBSERVER(ret, after_rollback, thd, (¶m)); + FOREACH_OBSERVER(ret, after_rollback, false, (¶m)); /* This is the end of a real transaction or autocommit statement, we @@ -307,7 +273,7 @@ int Binlog_storage_delegate::after_flush(THD *thd, log_info->log_pos = log_pos; int ret= 0; - FOREACH_OBSERVER(ret, after_flush, thd, + FOREACH_OBSERVER(ret, after_flush, false, (¶m, log_info->log_file, log_info->log_pos, flags)); return ret; } @@ -321,7 +287,7 @@ int Binlog_transmit_delegate::transmit_start(THD *thd, ushort flags, param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, transmit_start, thd, (¶m, log_file, log_pos)); + FOREACH_OBSERVER(ret, transmit_start, true, (¶m, log_file, log_pos)); return ret; } @@ -331,7 +297,7 @@ int Binlog_transmit_delegate::transmit_stop(THD *thd, ushort flags) param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, transmit_stop, thd, (¶m)); + FOREACH_OBSERVER(ret, transmit_stop, false, (¶m)); return ret; } @@ -356,13 +322,6 @@ int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags, Observer_info *info= iter++; for (; info; info= iter++) { - plugin_ref plugin= - my_plugin_lock(thd, info->plugin); - if (!plugin) - { - ret= 1; - break; - } hlen= 0; if (((Observer *)info->observer)->reserve_header && ((Observer *)info->observer)->reserve_header(¶m, @@ -371,10 +330,8 @@ int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags, &hlen)) { ret= 1; - plugin_unlock(thd, plugin); break; } - plugin_unlock(thd, plugin); if (hlen == 0) continue; if (hlen > RESERVE_HEADER_SIZE || packet->append((char *)header, hlen)) @@ -396,7 +353,7 @@ int Binlog_transmit_delegate::before_send_event(THD *thd, ushort flags, param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, before_send_event, thd, + FOREACH_OBSERVER(ret, before_send_event, false, (¶m, (uchar *)packet->c_ptr(), packet->length(), log_file+dirname_length(log_file), log_pos)); @@ -410,7 +367,7 @@ int Binlog_transmit_delegate::after_send_event(THD *thd, ushort flags, param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, after_send_event, thd, + FOREACH_OBSERVER(ret, after_send_event, false, (¶m, packet->c_ptr(), packet->length())); return ret; } @@ -422,7 +379,7 @@ int Binlog_transmit_delegate::after_reset_master(THD *thd, ushort flags) param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, after_reset_master, thd, (¶m)); + FOREACH_OBSERVER(ret, after_reset_master, false, (¶m)); return ret; } @@ -443,7 +400,7 @@ int Binlog_relay_IO_delegate::thread_start(THD *thd, Master_info *mi) init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, thread_start, thd, (¶m)); + FOREACH_OBSERVER(ret, thread_start, true, (¶m)); return ret; } @@ -455,7 +412,7 @@ int Binlog_relay_IO_delegate::thread_stop(THD *thd, Master_info *mi) init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, thread_stop, thd, (¶m)); + FOREACH_OBSERVER(ret, thread_stop, false, (¶m)); return ret; } @@ -467,7 +424,7 @@ int Binlog_relay_IO_delegate::before_request_transmit(THD *thd, init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, before_request_transmit, thd, (¶m, (uint32)flags)); + FOREACH_OBSERVER(ret, before_request_transmit, false, (¶m, (uint32)flags)); return ret; } @@ -480,7 +437,7 @@ int Binlog_relay_IO_delegate::after_read_event(THD *thd, Master_info *mi, init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, after_read_event, thd, + FOREACH_OBSERVER(ret, after_read_event, false, (¶m, packet, len, event_buf, event_len)); return ret; } @@ -498,7 +455,7 @@ int Binlog_relay_IO_delegate::after_queue_event(THD *thd, Master_info *mi, flags |= BINLOG_STORAGE_IS_SYNCED; int ret= 0; - FOREACH_OBSERVER(ret, after_queue_event, thd, + FOREACH_OBSERVER(ret, after_queue_event, false, (¶m, event_buf, event_len, flags)); return ret; } @@ -510,7 +467,7 @@ int Binlog_relay_IO_delegate::after_reset_slave(THD *thd, Master_info *mi) init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, after_reset_slave, thd, (¶m)); + FOREACH_OBSERVER(ret, after_reset_slave, false, (¶m)); return ret; } #endif /* HAVE_REPLICATION */ diff --git a/sql/rpl_handler.h b/sql/rpl_handler.h index e028fb49808..e262ebdbd6b 100644 --- a/sql/rpl_handler.h +++ b/sql/rpl_handler.h @@ -26,13 +26,10 @@ class Observer_info { public: void *observer; st_plugin_int *plugin_int; - plugin_ref plugin; Observer_info(void *ob, st_plugin_int *p) :observer(ob), plugin_int(p) - { - plugin= plugin_int_to_ref(plugin_int); - } + { } }; class Delegate { diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index e72d3470a7f..0d23248539c 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -7,15 +7,6 @@ /* Code for optional parallel execution of replicated events on the slave. - - ToDo list: - - - Retry of failed transactions is not yet implemented for the parallel case. - - - All the waits (eg. in struct wait_for_commit and in - rpl_parallel_thread_pool::get_thread()) need to be killable. And on kill, - everything needs to be correctly rolled back and stopped in all threads, - to ensure a consistent slave replication state. */ struct rpl_parallel_thread_pool global_rpl_thread_pool; @@ -32,7 +23,6 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev, Relay_log_info *rli= rgi->rli; THD *thd= rgi->thd; - thd->rgi_slave= rgi; thd->system_thread_info.rpl_sql_info->rpl_filter = rli->mi->rpl_filter; /* ToDo: Access to thd, and what about rli, split out a parallel part? */ @@ -44,7 +34,6 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev, rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos; strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name); err= apply_event_and_update_pos(qev->ev, thd, rgi, rpt); - thd->rgi_slave= NULL; thread_safe_increment64(&rli->executed_entries, &slave_executed_entries_lock); @@ -165,6 +154,7 @@ finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry, mysql_mutex_unlock(&entry->LOCK_parallel_entry); thd->clear_error(); + thd->reset_killed(); thd->get_stmt_da()->reset_diagnostics_area(); wfc->wakeup_subsequent_commits(rgi->worker_error); } @@ -197,6 +187,281 @@ unlock_or_exit_cond(THD *thd, mysql_mutex_t *lock, bool *did_enter_cond, } +static void +register_wait_for_prior_event_group_commit(rpl_group_info *rgi, + rpl_parallel_entry *entry) +{ + mysql_mutex_assert_owner(&entry->LOCK_parallel_entry); + if (rgi->wait_commit_sub_id > entry->last_committed_sub_id) + { + /* + Register that the commit of this event group must wait for the + commit of the previous event group to complete before it may + complete itself, so that we preserve commit order. + */ + wait_for_commit *waitee= + &rgi->wait_commit_group_info->commit_orderer; + rgi->commit_orderer.register_wait_for_prior_commit(waitee); + } +} + + +#ifndef DBUG_OFF +static int +dbug_simulate_tmp_error(rpl_group_info *rgi, THD *thd) +{ + if (rgi->current_gtid.domain_id == 0 && rgi->current_gtid.seq_no == 100 && + rgi->retry_event_count == 4) + { + thd->clear_error(); + thd->get_stmt_da()->reset_diagnostics_area(); + my_error(ER_LOCK_DEADLOCK, MYF(0)); + return 1; + } + return 0; +} +#endif + + +/* + If we detect a deadlock due to eg. storage engine locks that conflict with + the fixed commit order, then the later transaction will be killed + asynchroneously to allow the former to complete its commit. + + In this case, we convert the 'killed' error into a deadlock error, and retry + the later transaction. */ +static void +convert_kill_to_deadlock_error(rpl_group_info *rgi) +{ + THD *thd= rgi->thd; + int err_code; + + if (!thd->get_stmt_da()->is_error()) + return; + err_code= thd->get_stmt_da()->sql_errno(); + if ((err_code == ER_QUERY_INTERRUPTED || err_code == ER_CONNECTION_KILLED) && + rgi->killed_for_retry) + { + thd->clear_error(); + my_error(ER_LOCK_DEADLOCK, MYF(0)); + rgi->killed_for_retry= false; + thd->reset_killed(); + } +} + + +static bool +is_group_ending(Log_event *ev, Log_event_type event_type) +{ + return event_type == XID_EVENT || + (event_type == QUERY_EVENT && + (((Query_log_event *)ev)->is_commit() || + ((Query_log_event *)ev)->is_rollback())); +} + + +static int +retry_event_group(rpl_group_info *rgi, rpl_parallel_thread *rpt, + rpl_parallel_thread::queued_event *orig_qev) +{ + IO_CACHE rlog; + LOG_INFO linfo; + File fd= (File)-1; + const char *errmsg= NULL; + inuse_relaylog *ir= rgi->relay_log; + uint64 event_count; + uint64 events_to_execute= rgi->retry_event_count; + Relay_log_info *rli= rgi->rli; + int err; + ulonglong cur_offset, old_offset; + char log_name[FN_REFLEN]; + THD *thd= rgi->thd; + rpl_parallel_entry *entry= rgi->parallel_entry; + ulong retries= 0; + +do_retry: + event_count= 0; + err= 0; + + /* + If we already started committing before getting the deadlock (or other + error) that caused us to need to retry, we have already signalled + subsequent transactions that we have started committing. This is + potentially a problem, as now we will rollback, and if subsequent + transactions would start to execute now, they could see an unexpected + state of the database and get eg. key not found or duplicate key error. + + However, to get a deadlock in the first place, there must have been + another earlier transaction that is waiting for us. Thus that other + transaction has _not_ yet started to commit, and any subsequent + transactions will still be waiting at this point. + + So here, we decrement back the count of transactions that started + committing (if we already incremented it), undoing the effect of an + earlier mark_start_commit(). Then later, when the retry succeeds and we + commit again, we can do a new mark_start_commit() and eventually wake up + subsequent transactions at the proper time. + + We need to do the unmark before the rollback, to be sure that the + transaction we deadlocked with will not signal that it started to commit + until after the unmark. + */ + rgi->unmark_start_commit(); + + /* + We might get the deadlock error that causes the retry during commit, while + sitting in wait_for_prior_commit(). If this happens, we will have a + pending error in the wait_for_commit object. So clear this by + unregistering (and later re-registering) the wait. + */ + if(thd->wait_for_commit_ptr) + thd->wait_for_commit_ptr->unregister_wait_for_prior_commit(); + rgi->cleanup_context(thd, 1); + + mysql_mutex_lock(&rli->data_lock); + ++rli->retried_trans; + statistic_increment(slave_retried_transactions, LOCK_status); + mysql_mutex_unlock(&rli->data_lock); + + mysql_mutex_lock(&entry->LOCK_parallel_entry); + register_wait_for_prior_event_group_commit(rgi, entry); + mysql_mutex_unlock(&entry->LOCK_parallel_entry); + + strmake_buf(log_name, ir->name); + if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0) + { + err= 1; + goto err; + } + cur_offset= rgi->retry_start_offset; + my_b_seek(&rlog, cur_offset); + + do + { + Log_event_type event_type; + Log_event *ev; + rpl_parallel_thread::queued_event *qev; + + /* The loop is here so we can try again the next relay log file on EOF. */ + for (;;) + { + old_offset= cur_offset; + ev= Log_event::read_log_event(&rlog, 0, + rli->relay_log.description_event_for_exec /* ToDo: this needs fixing */, + opt_slave_sql_verify_checksum); + cur_offset= my_b_tell(&rlog); + + if (ev) + break; + if (rlog.error < 0) + { + errmsg= "slave SQL thread aborted because of I/O error"; + err= 1; + goto err; + } + if (rlog.error > 0) + { + sql_print_error("Slave SQL thread: I/O error reading " + "event(errno: %d cur_log->error: %d)", + my_errno, rlog.error); + errmsg= "Aborting slave SQL thread because of partial event read"; + err= 1; + goto err; + } + /* EOF. Move to the next relay log. */ + end_io_cache(&rlog); + mysql_file_close(fd, MYF(MY_WME)); + fd= (File)-1; + + /* Find the next relay log file. */ + if((err= rli->relay_log.find_log_pos(&linfo, log_name, 1)) || + (err= rli->relay_log.find_next_log(&linfo, 1))) + { + char buff[22]; + sql_print_error("next log error: %d offset: %s log: %s", + err, + llstr(linfo.index_file_offset, buff), + log_name); + goto err; + } + strmake_buf(log_name ,linfo.log_file_name); + + if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0) + { + err= 1; + goto err; + } + /* Loop to try again on the new log file. */ + } + + event_type= ev->get_type_code(); + if (!Log_event::is_group_event(event_type)) + { + delete ev; + continue; + } + ev->thd= thd; + + mysql_mutex_lock(&rpt->LOCK_rpl_thread); + qev= rpt->retry_get_qev(ev, orig_qev, log_name, cur_offset, + cur_offset - old_offset); + mysql_mutex_unlock(&rpt->LOCK_rpl_thread); + if (!qev) + { + delete ev; + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + err= 1; + goto err; + } + if (is_group_ending(ev, event_type)) + rgi->mark_start_commit(); + + err= rpt_handle_event(qev, rpt); + ++event_count; + mysql_mutex_lock(&rpt->LOCK_rpl_thread); + rpt->free_qev(qev); + mysql_mutex_unlock(&rpt->LOCK_rpl_thread); + + delete_or_keep_event_post_apply(rgi, event_type, ev); + DBUG_EXECUTE_IF("rpl_parallel_simulate_double_temp_err_gtid_0_x_100", + if (retries == 0) err= dbug_simulate_tmp_error(rgi, thd);); + DBUG_EXECUTE_IF("rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100", + err= dbug_simulate_tmp_error(rgi, thd);); + if (err) + { + convert_kill_to_deadlock_error(rgi); + if (has_temporary_error(thd)) + { + ++retries; + if (retries < slave_trans_retries) + { + end_io_cache(&rlog); + mysql_file_close(fd, MYF(MY_WME)); + fd= (File)-1; + goto do_retry; + } + sql_print_error("Slave worker thread retried transaction %lu time(s) " + "in vain, giving up. Consider raising the value of " + "the slave_transaction_retries variable.", + slave_trans_retries); + } + goto err; + } + } while (event_count < events_to_execute); + +err: + + if (fd >= 0) + { + end_io_cache(&rlog); + mysql_file_close(fd, MYF(MY_WME)); + } + if (errmsg) + sql_print_error("Error reading relay log event: %s", errmsg); + return err; +} + + pthread_handler_t handle_rpl_parallel_thread(void *arg) { @@ -215,6 +480,8 @@ handle_rpl_parallel_thread(void *arg) rpl_sql_thread_info sql_info(NULL); size_t total_event_size; int err; + inuse_relaylog *last_ir; + uint64 accumulated_ir_count; struct rpl_parallel_thread *rpt= (struct rpl_parallel_thread *)arg; @@ -244,39 +511,6 @@ handle_rpl_parallel_thread(void *arg) thd->set_time(); thd->variables.lock_wait_timeout= LONG_TIMEOUT; thd->system_thread_info.rpl_sql_info= &sql_info; - /* - For now, we need to run the replication parallel worker threads in - READ COMMITTED. This is needed because gap locks are not symmetric. - For example, a gap lock from a DELETE blocks an insert intention lock, - but not vice versa. So an INSERT followed by DELETE can group commit - on the master, but if we are unlucky with thread scheduling we can - then deadlock on the slave because the INSERT ends up waiting for a - gap lock from the DELETE (and the DELETE in turn waits for the INSERT - in wait_for_prior_commit()). See also MDEV-5914. - - It should be mostly safe to run in READ COMMITTED in the slave anyway. - The commit order is already fixed from on the master, so we do not - risk logging into the binlog in an incorrect order between worker - threads (one that would cause different results if executed on a - lower-level slave that uses this slave as a master). The only - potential problem is with transactions run in a different master - connection (using multi-source replication), or run directly on the - slave by an application; when using READ COMMITTED we are not - guaranteed serialisability of binlogged statements. - - In practice, this is unlikely to be an issue. In GTID mode, such - parallel transactions from multi-source or application must in any - case use a different replication domain, in which case binlog order - by definition must be independent between the different domain. Even - in non-GTID mode, normally one will assume that the external - transactions are not conflicting with those applied by the slave, so - that isolation level should make no difference. It would be rather - strange if the result of applying query events from one master would - depend on the timing and nature of other queries executed from - different multi-source connections or done directly on the slave by - an application. Still, something to be aware of. - */ - thd->variables.tx_isolation= ISO_READ_COMMITTED; mysql_mutex_lock(&rpt->LOCK_rpl_thread); rpt->thd= thd; @@ -332,7 +566,7 @@ handle_rpl_parallel_thread(void *arg) continue; } - group_rgi= rgi; + thd->rgi_slave= group_rgi= rgi; gco= rgi->gco; /* Handle a new event group, which will be initiated by a GTID event. */ if ((event_type= events->ev->get_type_code()) == GTID_EVENT) @@ -341,7 +575,6 @@ handle_rpl_parallel_thread(void *arg) PSI_stage_info old_stage; uint64 wait_count; - thd->tx_isolation= (enum_tx_isolation)thd->variables.tx_isolation; in_event_group= true; /* If the standalone flag is set, then this event group consists of a @@ -352,9 +585,7 @@ handle_rpl_parallel_thread(void *arg) (0 != (static_cast<Gtid_log_event *>(events->ev)->flags2 & Gtid_log_event::FL_STANDALONE)); - /* Save this, as it gets cleared when the event group commits. */ event_gtid_sub_id= rgi->gtid_sub_id; - rgi->thd= thd; /* @@ -388,7 +619,7 @@ handle_rpl_parallel_thread(void *arg) { DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior_killed"); thd->send_kill_message(); - slave_output_error_info(rgi->rli, thd); + slave_output_error_info(rgi, thd); signal_error_to_sql_driver_thread(thd, rgi, 1); /* Even though we were killed, we need to continue waiting for the @@ -430,17 +661,9 @@ handle_rpl_parallel_thread(void *arg) if (unlikely(entry->stop_on_error_sub_id <= rgi->wait_commit_sub_id)) skip_event_group= true; - else if (rgi->wait_commit_sub_id > entry->last_committed_sub_id) - { - /* - Register that the commit of this event group must wait for the - commit of the previous event group to complete before it may - complete itself, so that we preserve commit order. - */ - wait_for_commit *waitee= - &rgi->wait_commit_group_info->commit_orderer; - rgi->commit_orderer.register_wait_for_prior_commit(waitee); - } + else + register_wait_for_prior_event_group_commit(rgi, entry); + unlock_or_exit_cond(thd, &entry->LOCK_parallel_entry, &did_enter_cond, &old_stage); @@ -467,7 +690,7 @@ handle_rpl_parallel_thread(void *arg) if (res < 0) { /* Error. */ - slave_output_error_info(rgi->rli, thd); + slave_output_error_info(rgi, thd); signal_error_to_sql_driver_thread(thd, rgi, 1); } else if (!res) @@ -482,11 +705,8 @@ handle_rpl_parallel_thread(void *arg) } } - group_ending= event_type == XID_EVENT || - (event_type == QUERY_EVENT && - (((Query_log_event *)events->ev)->is_commit() || - ((Query_log_event *)events->ev)->is_rollback())); - if (group_ending) + group_ending= is_group_ending(events->ev, event_type); + if (group_ending && likely(!rgi->worker_error)) { DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit"); rgi->mark_start_commit(); @@ -498,24 +718,42 @@ handle_rpl_parallel_thread(void *arg) processing between the event groups as a simple way to ensure that everything is stopped and cleaned up correctly. */ - if (!rgi->worker_error && !skip_event_group) + if (likely(!rgi->worker_error) && !skip_event_group) + { + ++rgi->retry_event_count; err= rpt_handle_event(events, rpt); + delete_or_keep_event_post_apply(rgi, event_type, events->ev); + DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_gtid_0_x_100", + err= dbug_simulate_tmp_error(rgi, thd);); + if (err) + { + convert_kill_to_deadlock_error(rgi); + if (has_temporary_error(thd) && slave_trans_retries > 0) + err= retry_event_group(rgi, rpt, events); + } + } else + { + delete events->ev; err= thd->wait_for_prior_commit(); + } end_of_group= in_event_group && ((group_standalone && !Log_event::is_part_of_group(event_type)) || group_ending); - delete_or_keep_event_post_apply(rgi, event_type, events->ev); events->next= qevs_to_free; qevs_to_free= events; - if (unlikely(err) && !rgi->worker_error) + if (unlikely(err)) { - slave_output_error_info(rgi->rli, thd); - signal_error_to_sql_driver_thread(thd, rgi, err); + if (!rgi->worker_error) + { + slave_output_error_info(rgi, thd); + signal_error_to_sql_driver_thread(thd, rgi, err); + } + thd->reset_killed(); } if (end_of_group) { @@ -523,7 +761,7 @@ handle_rpl_parallel_thread(void *arg) finish_event_group(thd, event_gtid_sub_id, entry, rgi); rgi->next= rgis_to_free; rgis_to_free= rgi; - group_rgi= rgi= NULL; + thd->rgi_slave= group_rgi= rgi= NULL; skip_event_group= false; DEBUG_SYNC(thd, "rpl_parallel_end_of_group"); } @@ -548,12 +786,34 @@ handle_rpl_parallel_thread(void *arg) rpt->free_rgi(rgis_to_free); rgis_to_free= next; } + last_ir= NULL; + accumulated_ir_count= 0; while (qevs_to_free) { rpl_parallel_thread::queued_event *next= qevs_to_free->next; + inuse_relaylog *ir= qevs_to_free->ir; + /* Batch up refcount update to reduce use of synchronised operations. */ + if (last_ir != ir) + { + if (last_ir) + { + my_atomic_rwlock_wrlock(&rli->inuse_relaylog_atomic_lock); + my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count); + my_atomic_rwlock_wrunlock(&rli->inuse_relaylog_atomic_lock); + accumulated_ir_count= 0; + } + last_ir= ir; + } + ++accumulated_ir_count; rpt->free_qev(qevs_to_free); qevs_to_free= next; } + if (last_ir) + { + my_atomic_rwlock_wrlock(&rli->inuse_relaylog_atomic_lock); + my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count); + my_atomic_rwlock_wrunlock(&rli->inuse_relaylog_atomic_lock); + } if ((events= rpt->event_queue) != NULL) { @@ -584,7 +844,7 @@ handle_rpl_parallel_thread(void *arg) in_event_group= false; mysql_mutex_lock(&rpt->LOCK_rpl_thread); rpt->free_rgi(group_rgi); - group_rgi= NULL; + thd->rgi_slave= group_rgi= NULL; skip_event_group= false; } if (!in_event_group) @@ -802,8 +1062,7 @@ err: rpl_parallel_thread::queued_event * -rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, - Relay_log_info *rli) +rpl_parallel_thread::get_qev_common(Log_event *ev, ulonglong event_size) { queued_event *qev; mysql_mutex_assert_owner(&LOCK_rpl_thread); @@ -817,6 +1076,17 @@ rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, qev->ev= ev; qev->event_size= event_size; qev->next= NULL; + return qev; +} + + +rpl_parallel_thread::queued_event * +rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, + Relay_log_info *rli) +{ + queued_event *qev= get_qev_common(ev, event_size); + if (!qev) + return NULL; strcpy(qev->event_relay_log_name, rli->event_relay_log_name); qev->event_relay_log_pos= rli->event_relay_log_pos; qev->future_event_relay_log_pos= rli->future_event_relay_log_pos; @@ -825,6 +1095,24 @@ rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, } +rpl_parallel_thread::queued_event * +rpl_parallel_thread::retry_get_qev(Log_event *ev, queued_event *orig_qev, + const char *relay_log_name, + ulonglong event_pos, ulonglong event_size) +{ + queued_event *qev= get_qev_common(ev, event_size); + if (!qev) + return NULL; + qev->rgi= orig_qev->rgi; + strcpy(qev->event_relay_log_name, relay_log_name); + qev->event_relay_log_pos= event_pos; + qev->future_event_relay_log_pos= event_pos+event_size; + strcpy(qev->future_event_master_log_name, + orig_qev->future_event_master_log_name); + return qev; +} + + void rpl_parallel_thread::free_qev(rpl_parallel_thread::queued_event *qev) { @@ -836,7 +1124,7 @@ rpl_parallel_thread::free_qev(rpl_parallel_thread::queued_event *qev) rpl_group_info* rpl_parallel_thread::get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev, - rpl_parallel_entry *e) + rpl_parallel_entry *e, ulonglong event_size) { rpl_group_info *rgi; mysql_mutex_assert_owner(&LOCK_rpl_thread); @@ -864,6 +1152,10 @@ rpl_parallel_thread::get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev, return NULL; } rgi->parallel_entry= e; + rgi->relay_log= rli->last_inuse_relaylog; + rgi->retry_start_offset= rli->future_event_relay_log_pos-event_size; + rgi->retry_event_count= 0; + rgi->killed_for_retry= false; return rgi; } @@ -1018,10 +1310,11 @@ rpl_parallel_thread_pool::release_thread(rpl_parallel_thread *rpt) if it is still available. Otherwise a new worker thread is allocated. */ rpl_parallel_thread * -rpl_parallel_entry::choose_thread(Relay_log_info *rli, bool *did_enter_cond, +rpl_parallel_entry::choose_thread(rpl_group_info *rgi, bool *did_enter_cond, PSI_stage_info *old_stage, bool reuse) { uint32 idx; + Relay_log_info *rli= rgi->rli; rpl_parallel_thread *thr; idx= rpl_thread_idx; @@ -1066,7 +1359,7 @@ rpl_parallel_entry::choose_thread(Relay_log_info *rli, bool *did_enter_cond, debug_sync_set_action(rli->sql_driver_thd, STRING_WITH_LEN("now SIGNAL wait_queue_killed")); };); - slave_output_error_info(rli, rli->sql_driver_thd); + slave_output_error_info(rgi, rli->sql_driver_thd); return NULL; } else @@ -1390,15 +1683,9 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, if (typ == GTID_EVENT) { - uint32 domain_id; - if (likely(typ == GTID_EVENT)) - { - Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev); - domain_id= (rli->mi->using_gtid == Master_info::USE_GTID_NO ? - 0 : gtid_ev->domain_id); - } - else - domain_id= 0; + Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev); + uint32 domain_id= (rli->mi->using_gtid == Master_info::USE_GTID_NO ? + 0 : gtid_ev->domain_id); if (!(e= find(domain_id))) { my_error(ER_OUT_OF_RESOURCES, MYF(MY_WME)); @@ -1417,7 +1704,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, instead re-use a thread that we queued for previously. */ cur_thread= - e->choose_thread(rli, &did_enter_cond, &old_stage, typ != GTID_EVENT); + e->choose_thread(serial_rgi, &did_enter_cond, &old_stage, + typ != GTID_EVENT); if (!cur_thread) { /* This means we were killed. The error is already signalled. */ @@ -1437,7 +1725,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, { Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev); - if (!(rgi= cur_thread->get_rgi(rli, gtid_ev, e))) + if (!(rgi= cur_thread->get_rgi(rli, gtid_ev, e, event_size))) { cur_thread->free_qev(qev); abandon_worker_thread(rli->sql_driver_thd, cur_thread, @@ -1549,6 +1837,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, Queue the event for processing. */ rli->event_relay_log_pos= rli->future_event_relay_log_pos; + qev->ir= rli->last_inuse_relaylog; + ++qev->ir->queued_count; cur_thread->enqueue(qev); unlock_or_exit_cond(rli->sql_driver_thd, &cur_thread->LOCK_rpl_thread, &did_enter_cond, &old_stage); diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index c4bb407e5eb..415259cd3c4 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -9,6 +9,7 @@ struct rpl_parallel_entry; struct rpl_parallel_thread_pool; class Relay_log_info; +struct inuse_relaylog; /* @@ -73,6 +74,7 @@ struct rpl_parallel_thread { queued_event *next; Log_event *ev; rpl_group_info *rgi; + inuse_relaylog *ir; ulonglong future_event_relay_log_pos; char event_relay_log_name[FN_REFLEN]; char future_event_master_log_name[FN_REFLEN]; @@ -106,11 +108,15 @@ struct rpl_parallel_thread { queued_size-= dequeue_size; } + queued_event *get_qev_common(Log_event *ev, ulonglong event_size); queued_event *get_qev(Log_event *ev, ulonglong event_size, Relay_log_info *rli); + queued_event *retry_get_qev(Log_event *ev, queued_event *orig_qev, + const char *relay_log_name, + ulonglong event_pos, ulonglong event_size); void free_qev(queued_event *qev); rpl_group_info *get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev, - rpl_parallel_entry *e); + rpl_parallel_entry *e, ulonglong event_size); void free_rgi(rpl_group_info *rgi); group_commit_orderer *get_gco(uint64 wait_count, group_commit_orderer *prev); void free_gco(group_commit_orderer *gco); @@ -176,7 +182,7 @@ struct rpl_parallel_entry { Event groups commit in order, so the rpl_group_info for an event group will be alive (at least) as long as - rpl_grou_info::gtid_sub_id > last_committed_sub_id. This can be used to + rpl_group_info::gtid_sub_id > last_committed_sub_id. This can be used to safely refer back to previous event groups if they are still executing, and ignore them if they completed, without requiring explicit synchronisation between the threads. @@ -208,7 +214,7 @@ struct rpl_parallel_entry { /* The group_commit_orderer object for the events currently being queued. */ group_commit_orderer *current_gco; - rpl_parallel_thread * choose_thread(Relay_log_info *rli, bool *did_enter_cond, + rpl_parallel_thread * choose_thread(rpl_group_info *rgi, bool *did_enter_cond, PSI_stage_info *old_stage, bool reuse); group_commit_orderer *get_gco(); void free_gco(group_commit_orderer *gco); diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc index b82dbb07a42..991c918ea7f 100644 --- a/sql/rpl_record.cc +++ b/sql/rpl_record.cc @@ -335,6 +335,7 @@ unpack_row(rpl_group_info *rgi, #endif /* WITH_WSREP */ rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, + rgi->gtid_info(), "Could not read field '%s' of table '%s.%s'", f->field_name, table->s->db.str, table->s->table_name.str); diff --git a/sql/rpl_record_old.cc b/sql/rpl_record_old.cc index 5afa529a63c..8b43b268c17 100644 --- a/sql/rpl_record_old.cc +++ b/sql/rpl_record_old.cc @@ -141,7 +141,7 @@ unpack_row_old(rpl_group_info *rgi, f->move_field_offset(-offset); if (!ptr) { - rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, + rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, NULL, "Could not read field `%s` of table `%s`.`%s`", f->field_name, table->s->db.str, table->s->table_name.str); @@ -183,7 +183,7 @@ unpack_row_old(rpl_group_info *rgi, if (event_type == WRITE_ROWS_EVENT && ((*field_ptr)->flags & mask) == mask) { - rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, + rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, NULL, "Field `%s` of table `%s`.`%s` " "has no default value and cannot be NULL", (*field_ptr)->field_name, table->s->db.str, diff --git a/sql/rpl_reporting.cc b/sql/rpl_reporting.cc index 96fe6242ac3..eb362941f3e 100644 --- a/sql/rpl_reporting.cc +++ b/sql/rpl_reporting.cc @@ -28,6 +28,7 @@ Slave_reporting_capability::Slave_reporting_capability(char const *thread_name) void Slave_reporting_capability::report(loglevel level, int err_code, + const char *extra_info, const char *msg, ...) const { void (*report_function)(const char *, ...); @@ -67,9 +68,10 @@ Slave_reporting_capability::report(loglevel level, int err_code, va_end(args); /* If the msg string ends with '.', do not add a ',' it would be ugly */ - report_function("Slave %s: %s%s Internal MariaDB error code: %d", + report_function("Slave %s: %s%s %s%sInternal MariaDB error code: %d", m_thread_name, pbuff, (pbuff[0] && *(strend(pbuff)-1) == '.') ? "" : ",", + (extra_info ? extra_info : ""), (extra_info ? ", " : ""), err_code); } diff --git a/sql/rpl_reporting.h b/sql/rpl_reporting.h index 2b5e0527b9b..d90b7ad6650 100644 --- a/sql/rpl_reporting.h +++ b/sql/rpl_reporting.h @@ -52,8 +52,9 @@ public: code, but can contain more information), in printf() format. */ - void report(loglevel level, int err_code, const char *msg, ...) const - ATTRIBUTE_FORMAT(printf, 4, 5); + void report(loglevel level, int err_code, const char *extra_info, + const char *msg, ...) const + ATTRIBUTE_FORMAT(printf, 5, 6); /** Clear errors. They will not show up under <code>SHOW SLAVE diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index a162d1d79f8..0b133555cea 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -52,6 +52,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery) info_fd(-1), cur_log_fd(-1), relay_log(&sync_relaylog_period), sync_counter(0), is_relay_log_recovery(is_slave_recovery), save_temporary_tables(0), mi(0), + inuse_relaylog_list(0), last_inuse_relaylog(0), cur_log_old_open_count(0), group_relay_log_pos(0), event_relay_log_pos(0), #if HAVE_valgrind @@ -91,6 +92,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery) mysql_cond_init(key_relay_log_info_start_cond, &start_cond, NULL); mysql_cond_init(key_relay_log_info_stop_cond, &stop_cond, NULL); mysql_cond_init(key_relay_log_info_log_space_cond, &log_space_cond, NULL); + my_atomic_rwlock_init(&inuse_relaylog_atomic_lock); relay_log.init_pthread_objects(); DBUG_VOID_RETURN; } @@ -98,8 +100,17 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery) Relay_log_info::~Relay_log_info() { + inuse_relaylog *cur; DBUG_ENTER("Relay_log_info::~Relay_log_info"); + cur= inuse_relaylog_list; + while (cur) + { + DBUG_ASSERT(cur->queued_count == cur->dequeued_count); + inuse_relaylog *next= cur->next; + my_free(cur); + cur= next; + } mysql_mutex_destroy(&run_lock); mysql_mutex_destroy(&data_lock); mysql_mutex_destroy(&log_space_lock); @@ -107,6 +118,7 @@ Relay_log_info::~Relay_log_info() mysql_cond_destroy(&start_cond); mysql_cond_destroy(&stop_cond); mysql_cond_destroy(&log_space_cond); + my_atomic_rwlock_destroy(&inuse_relaylog_atomic_lock); relay_log.cleanup(); DBUG_VOID_RETURN; } @@ -305,20 +317,80 @@ Failed to open the existing relay log info file '%s' (errno %d)", } rli->info_fd = info_fd; - int relay_log_pos, master_log_pos; + int relay_log_pos, master_log_pos, lines; + char *first_non_digit; + /* + In MySQL 5.6, there is a MASTER_DELAY option to CHANGE MASTER. This is + not yet merged into MariaDB (as of 10.0.13). However, we detect the + presense of the new option in relay-log.info, as a placeholder for + possible later merge of the feature, and to maintain file format + compatibility with MySQL 5.6+. + */ + int dummy_sql_delay; + + /* + Starting from MySQL 5.6.x, relay-log.info has a new format. + Now, its first line contains the number of lines in the file. + By reading this number we can determine which version our master.info + comes from. We can't simply count the lines in the file, since + versions before 5.6.x could generate files with more lines than + needed. If first line doesn't contain a number, or if it + contains a number less than LINES_IN_RELAY_LOG_INFO_WITH_DELAY, + then the file is treated like a file from pre-5.6.x version. + There is no ambiguity when reading an old master.info: before + 5.6.x, the first line contained the binlog's name, which is + either empty or has an extension (contains a '.'), so can't be + confused with an integer. + + So we're just reading first line and trying to figure which + version is this. + */ + + /* + The first row is temporarily stored in mi->master_log_name, if + it is line count and not binlog name (new format) it will be + overwritten by the second row later. + */ if (init_strvar_from_file(rli->group_relay_log_name, sizeof(rli->group_relay_log_name), + &rli->info_file, "")) + { + msg="Error reading slave log configuration"; + goto err; + } + + lines= strtoul(rli->group_relay_log_name, &first_non_digit, 10); + + if (rli->group_relay_log_name[0] != '\0' && + *first_non_digit == '\0' && + lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY) + { + DBUG_PRINT("info", ("relay_log_info file is in new format.")); + /* Seems to be new format => read relay log name from next line */ + if (init_strvar_from_file(rli->group_relay_log_name, + sizeof(rli->group_relay_log_name), + &rli->info_file, "")) + { + msg="Error reading slave log configuration"; + goto err; + } + } + else + DBUG_PRINT("info", ("relay_log_info file is in old format.")); + + if (init_intvar_from_file(&relay_log_pos, + &rli->info_file, BIN_LOG_HEADER_SIZE) || + init_strvar_from_file(rli->group_master_log_name, + sizeof(rli->group_master_log_name), &rli->info_file, "") || - init_intvar_from_file(&relay_log_pos, - &rli->info_file, BIN_LOG_HEADER_SIZE) || - init_strvar_from_file(rli->group_master_log_name, - sizeof(rli->group_master_log_name), - &rli->info_file, "") || - init_intvar_from_file(&master_log_pos, &rli->info_file, 0)) + init_intvar_from_file(&master_log_pos, &rli->info_file, 0) || + (lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY && + init_intvar_from_file(&dummy_sql_delay, &rli->info_file, 0))) { msg="Error reading slave log configuration"; goto err; } + strmake_buf(rli->event_relay_log_name,rli->group_relay_log_name); rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos; rli->group_master_log_pos= master_log_pos; @@ -1024,7 +1096,6 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset, DBUG_ASSERT(rli->slave_running == 0); DBUG_ASSERT(rli->mi->slave_running == 0); - rli->slave_skip_counter=0; mysql_mutex_lock(&rli->data_lock); /* @@ -1243,7 +1314,7 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos, inc_group_relay_log_pos(event_master_log_pos, rgi); if (rpl_global_gtid_slave_state.record_and_update_gtid(thd, rgi)) { - report(WARNING_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, + report(WARNING_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(), "Failed to update GTID state in %s.%s, slave state may become " "inconsistent: %d: %s", "mysql", rpl_gtid_slave_state_table_name.str, @@ -1279,6 +1350,32 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos, DBUG_VOID_RETURN; } + +int +Relay_log_info::alloc_inuse_relaylog(const char *name) +{ + inuse_relaylog *ir; + + if (!(ir= (inuse_relaylog *)my_malloc(sizeof(*ir), MYF(MY_WME|MY_ZEROFILL)))) + { + my_error(ER_OUTOFMEMORY, MYF(0), (int)sizeof(*ir)); + return 1; + } + strmake_buf(ir->name, name); + + if (!inuse_relaylog_list) + inuse_relaylog_list= ir; + else + { + last_inuse_relaylog->completed= true; + last_inuse_relaylog->next= ir; + } + last_inuse_relaylog= ir; + + return 0; +} + + #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) int rpl_load_gtid_slave_state(THD *thd) @@ -1465,6 +1562,9 @@ rpl_group_info::reinit(Relay_log_info *rli) tables_to_lock_count= 0; trans_retries= 0; last_event_start_time= 0; + gtid_sub_id= 0; + commit_id= 0; + gtid_pending= false; worker_error= 0; row_stmt_start_timestamp= 0; long_find_row_note_printed= false; @@ -1474,7 +1574,7 @@ rpl_group_info::reinit(Relay_log_info *rli) } rpl_group_info::rpl_group_info(Relay_log_info *rli) - : thd(0), gtid_sub_id(0), wait_commit_sub_id(0), + : thd(0), wait_commit_sub_id(0), wait_commit_group_info(0), parallel_entry(0), deferred_events(NULL), m_annotate_event(0), is_parallel_exec(false) { @@ -1505,9 +1605,11 @@ event_group_new_gtid(rpl_group_info *rgi, Gtid_log_event *gev) return 1; } rgi->gtid_sub_id= sub_id; - rgi->current_gtid.server_id= gev->server_id; rgi->current_gtid.domain_id= gev->domain_id; + rgi->current_gtid.server_id= gev->server_id; rgi->current_gtid.seq_no= gev->seq_no; + rgi->commit_id= gev->commit_id; + rgi->gtid_pending= true; return 0; } @@ -1563,7 +1665,7 @@ delete_or_keep_event_post_apply(rpl_group_info *rgi, void rpl_group_info::cleanup_context(THD *thd, bool error) { - DBUG_ENTER("Relay_log_info::cleanup_context"); + DBUG_ENTER("rpl_group_info::cleanup_context"); DBUG_PRINT("enter", ("error: %d", (int) error)); DBUG_ASSERT(this->thd == thd); @@ -1629,7 +1731,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error) void rpl_group_info::clear_tables_to_lock() { - DBUG_ENTER("Relay_log_info::clear_tables_to_lock()"); + DBUG_ENTER("rpl_group_info::clear_tables_to_lock()"); #ifndef DBUG_OFF /** When replicating in RBR and MyISAM Merge tables are involved @@ -1676,7 +1778,7 @@ void rpl_group_info::clear_tables_to_lock() void rpl_group_info::slave_close_thread_tables(THD *thd) { - DBUG_ENTER("Relay_log_info::slave_close_thread_tables(THD *thd)"); + DBUG_ENTER("rpl_group_info::slave_close_thread_tables(THD *thd)"); thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); thd->get_stmt_da()->set_overwrite_status(false); @@ -1745,6 +1847,54 @@ rpl_group_info::mark_start_commit() } +/* + Format the current GTID as a string suitable for printing in error messages. + + The string is stored in a buffer inside rpl_group_info, so remains valid + until next call to gtid_info() or until destruction of rpl_group_info. + + If no GTID is available, then NULL is returned. +*/ +char * +rpl_group_info::gtid_info() +{ + if (!gtid_sub_id || !current_gtid.seq_no) + return NULL; + my_snprintf(gtid_info_buf, sizeof(gtid_info_buf), "Gtid %u-%u-%llu", + current_gtid.domain_id, current_gtid.server_id, + current_gtid.seq_no); + return gtid_info_buf; +} + + +/* + Undo the effect of a prior mark_start_commit(). + + This is only used for retrying a transaction in parallel replication, after + we have encountered a deadlock or other temporary error. + + When we get such a deadlock, it means that the current group of transactions + did not yet all start committing (else they would not have deadlocked). So + we will not yet have woken up anything in the next group, our rgi->gco is + still live, and we can simply decrement the counter (to be incremented again + later, when the retry succeeds and reaches the commit step). +*/ +void +rpl_group_info::unmark_start_commit() +{ + rpl_parallel_entry *e; + + if (!did_mark_start_commit) + return; + + e= this->parallel_entry; + mysql_mutex_lock(&e->LOCK_parallel_entry); + --e->count_committing_event_groups; + mysql_mutex_unlock(&e->LOCK_parallel_entry); + did_mark_start_commit= false; +} + + rpl_sql_thread_info::rpl_sql_thread_info(Rpl_filter *filter) : rpl_filter(filter) { diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index 137571ab820..ce30813790c 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -28,6 +28,12 @@ struct RPL_TABLE_LIST; class Master_info; class Rpl_filter; + +enum { + LINES_IN_RELAY_LOG_INFO_WITH_DELAY= 5 +}; + + /**************************************************************************** Replication SQL Thread @@ -55,6 +61,7 @@ class Rpl_filter; *****************************************************************************/ struct rpl_group_info; +struct inuse_relaylog; class Relay_log_info : public Slave_reporting_capability { @@ -158,6 +165,15 @@ public: Master_info *mi; /* + List of active relay log files. + (This can be more than one in case of parallel replication). + */ + inuse_relaylog *inuse_relaylog_list; + inuse_relaylog *last_inuse_relaylog; + /* Lock used to protect inuse_relaylog::dequeued_count */ + my_atomic_rwlock_t inuse_relaylog_atomic_lock; + + /* Needed to deal properly with cur_log getting closed and re-opened with a different log under our feet */ @@ -392,6 +408,7 @@ public: void stmt_done(my_off_t event_log_pos, time_t event_creation_time, THD *thd, rpl_group_info *rgi); + int alloc_inuse_relaylog(const char *name); /** Is the replication inside a group? @@ -458,6 +475,39 @@ private: /* + In parallel replication, if we need to re-try a transaction due to a + deadlock or other temporary error, we may need to go back and re-read events + out of an earlier relay log. + + This structure keeps track of the relaylogs that are potentially in use. + Each rpl_group_info has a pointer to one of those, corresponding to the + first GTID event. + + A pair of reference count keeps track of how long a relay log is potentially + in use. When the `completed' flag is set, all events have been read out of + the relay log, but the log might still be needed for retry in worker + threads. As worker threads complete an event group, they increment + atomically the `dequeued_count' with number of events queued. Thus, when + completed is set and dequeued_count equals queued_count, the relay log file + is finally done with and can be purged. + + By separating the queued and dequeued count, only the dequeued_count needs + multi-thread synchronisation; the completed flag and queued_count fields + are only accessed by the SQL driver thread and need no synchronisation. +*/ +struct inuse_relaylog { + inuse_relaylog *next; + /* Number of events in this relay log queued for worker threads. */ + int64 queued_count; + /* Number of events completed by worker threads. */ + volatile int64 dequeued_count; + /* Set when all events have been read from a relaylog. */ + bool completed; + char name[FN_REFLEN]; +}; + + +/* This is data for various state needed to be kept for the processing of one event group (transaction) during replication. @@ -483,6 +533,7 @@ struct rpl_group_info */ uint64 gtid_sub_id; rpl_gtid current_gtid; + uint64 commit_id; /* This is used to keep transaction commit order. We will signal this when we commit, and can register it to wait for the @@ -560,6 +611,8 @@ struct rpl_group_info */ char future_event_master_log_name[FN_REFLEN]; bool is_parallel_exec; + /* When gtid_pending is true, we have not yet done record_gtid(). */ + bool gtid_pending; int worker_error; /* Set true when we signalled that we reach the commit phase. Used to avoid @@ -587,6 +640,17 @@ struct rpl_group_info */ time_t row_stmt_start_timestamp; bool long_find_row_note_printed; + /* Needs room for "Gtid D-S-N\x00". */ + char gtid_info_buf[5+10+1+10+1+20+1]; + + /* + Information to be able to re-try an event group in case of a deadlock or + other temporary error. + */ + inuse_relaylog *relay_log; + uint64 retry_start_offset; + uint64 retry_event_count; + bool killed_for_retry; rpl_group_info(Relay_log_info *rli_); ~rpl_group_info(); @@ -675,6 +739,8 @@ struct rpl_group_info void slave_close_thread_tables(THD *); void mark_start_commit_no_lock(); void mark_start_commit(); + char *gtid_info(); + void unmark_start_commit(); time_t get_row_stmt_start_timestamp() { diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 05227a29775..25dff72090c 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -826,7 +826,7 @@ can_convert_field_to(Field *field, @retval false Master table is not compatible with slave table. */ bool -table_def::compatible_with(THD *thd, Relay_log_info *rli, +table_def::compatible_with(THD *thd, rpl_group_info *rgi, TABLE *table, TABLE **conv_table_var) const { @@ -834,6 +834,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, We only check the initial columns for the tables. */ uint const cols_to_check= MY_MIN(table->s->fields, size()); + Relay_log_info *rli= rgi->rli; TABLE *tmp_table= NULL; for (uint col= 0 ; col < cols_to_check ; ++col) @@ -857,7 +858,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, This will create the full table with all fields. This is necessary to ge the correct field lengths for the record. */ - tmp_table= create_conversion_table(thd, rli, table); + tmp_table= create_conversion_table(thd, rgi, table); if (tmp_table == NULL) return false; /* @@ -885,7 +886,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, String target_type(target_buf, sizeof(target_buf), &my_charset_latin1); show_sql_type(type(col), field_metadata(col), &source_type, field->charset()); field->sql_type(target_type); - rli->report(ERROR_LEVEL, ER_SLAVE_CONVERSION_FAILED, + rli->report(ERROR_LEVEL, ER_SLAVE_CONVERSION_FAILED, rgi->gtid_info(), ER(ER_SLAVE_CONVERSION_FAILED), col, db_name, tbl_name, source_type.c_ptr_safe(), target_type.c_ptr_safe()); @@ -927,12 +928,14 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, conversion table. */ -TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *target_table) const +TABLE *table_def::create_conversion_table(THD *thd, rpl_group_info *rgi, + TABLE *target_table) const { DBUG_ENTER("table_def::create_conversion_table"); List<Create_field> field_list; TABLE *conv_table= NULL; + Relay_log_info *rli= rgi->rli; /* At slave, columns may differ. So we should create MY_MIN(columns@master, columns@slave) columns in the @@ -1014,7 +1017,7 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE * err: if (conv_table == NULL) - rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION, + rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION, rgi->gtid_info(), ER(ER_SLAVE_CANT_CREATE_CONVERSION), target_table->s->db.str, target_table->s->table_name.str); diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index 7568a2d786c..ed0ce16363b 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -30,6 +30,7 @@ class Relay_log_info; class Log_event; +struct rpl_group_info; /** A table definition from the master. @@ -187,7 +188,7 @@ public: @retval 0 if the table definition is compatible with @c table */ #ifndef MYSQL_CLIENT - bool compatible_with(THD *thd, Relay_log_info *rli, TABLE *table, + bool compatible_with(THD *thd, rpl_group_info *rgi, TABLE *table, TABLE **conv_table_var) const; /** @@ -212,7 +213,8 @@ public: @return A pointer to a temporary table with memory allocated in the thread's memroot, NULL if the table could not be created */ - TABLE *create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *target_table) const; + TABLE *create_conversion_table(THD *thd, rpl_group_info *rgi, + TABLE *target_table) const; #endif diff --git a/sql/scheduler.cc b/sql/scheduler.cc index ecf49e633ab..a9b253e478a 100644 --- a/sql/scheduler.cc +++ b/sql/scheduler.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. - Copyright (c) 2012, 2013, Monty Program Ab +/* Copyright (c) 2007, 2013, Oracle and/or its affiliates. + Copyright (c) 2012, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/scheduler.h b/sql/scheduler.h index 06c17c7b114..f7aff377eac 100644 --- a/sql/scheduler.h +++ b/sql/scheduler.h @@ -99,15 +99,13 @@ public: void *data; /* scheduler-specific data structure */ }; -#undef HAVE_POOL_OF_THREADS -#if !defined(EMBEDDED_LIBRARY) && !defined(_AIX) -#define HAVE_POOL_OF_THREADS 1 +#ifdef HAVE_POOL_OF_THREADS void pool_of_threads_scheduler(scheduler_functions* func, ulong *arg_max_connections, uint *arg_connection_count); #else #define pool_of_threads_scheduler(A,B,C) \ one_thread_per_connection_scheduler(A, B, C) -#endif +#endif /*HAVE_POOL_OF_THREADS*/ #endif /* SCHEDULER_INCLUDED */ diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index c36bdf3869f..76cf33c231f 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -3962,7 +3962,7 @@ ER_NEW_ABORTING_CONNECTION 08S01 spa "Abortada conexión %ld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)" swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)" ukr "Перервано з'єднання %ld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)" -ER_unused_2 +ER_UNUSED_10 eng "You should never see it" ER_FLUSH_MASTER_BINLOG_CLOSED eng "Binlog closed, cannot RESET MASTER" @@ -5879,10 +5879,9 @@ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT ger "Kein DATETIME-Ausdruck angegeben" ER_UNUSED_2 - eng "" - + eng "You should never see it" ER_UNUSED_3 - eng "" + eng "You should never see it" ER_EVENT_CANNOT_DELETE eng "Failed to delete the event from mysql.event" ger "Löschen des Events aus mysql.event fehlgeschlagen" @@ -5910,7 +5909,7 @@ ER_CANT_LOCK_LOG_TABLE eng "You can't use locks with log tables." ger "Log-Tabellen können nicht gesperrt werden." ER_UNUSED_4 - eng "" + eng "You should never see it" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix this error." ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben" @@ -6090,8 +6089,8 @@ ER_TRG_CANT_OPEN_TABLE ER_CANT_CREATE_SROUTINE eng "Cannot create stored routine `%-.64s`. Check warnings" ger "Kann gespeicherte Routine `%-.64s` nicht erzeugen. Beachten Sie die Warnungen" -ER_UNUSED - eng "" +ER_UNUSED_11 + eng "You should never see it" ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement." ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran." @@ -6459,7 +6458,7 @@ ER_BINLOG_UNSAFE_INSERT_TWO_KEYS ER_TABLE_IN_FK_CHECK eng "Table is being used in foreign key check." -ER_unused_1 +ER_UNUSED_1 eng "You should never see it" ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST @@ -6529,7 +6528,7 @@ ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET swe "Hittade en rad som inte passar i någon given partition" ER_UNUSED_5 - eng "" + eng "You should never see it" ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE eng "Failure while changing the type of replication repository: %s." @@ -6995,11 +6994,11 @@ ER_UNKNOWN_OPTION ER_BAD_OPTION_VALUE eng "Incorrect value '%-.64s' for option '%-.64s'" ER_UNUSED_6 - eng "" + eng "You should never see it" ER_UNUSED_7 - eng "" + eng "You should never see it" ER_UNUSED_8 - eng "" + eng "You should never see it" ER_DATA_OVERFLOW 22003 eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated." ER_DATA_TRUNCATED 22003 @@ -7024,8 +7023,8 @@ ER_VIEW_ORDERBY_IGNORED eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already." ER_CONNECTION_KILLED 70100 eng "Connection was killed" -ER_UNSED - eng "Internal error: '%-.192s'" +ER_UNUSED_12 + eng "You should never see it" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION eng "Cannot modify @@session.skip_replication inside a transaction" ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION @@ -7110,3 +7109,5 @@ ER_IT_IS_A_VIEW 42S02 eng "'%-.192s' is a view" ER_SLAVE_SKIP_NOT_IN_GTID eng "When using GTID, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position." +ER_TABLE_DEFINITION_TOO_BIG + eng "The definition for table %`s is too big" diff --git a/sql/slave.cc b/sql/slave.cc index 54a32f320c0..93bb8669632 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -304,7 +304,10 @@ handle_slave_init(void *arg __attribute__((unused))) mysql_mutex_lock(&LOCK_thread_count); thd->thread_id= thread_id++; mysql_mutex_unlock(&LOCK_thread_count); + thd->system_thread = SYSTEM_THREAD_SLAVE_INIT; thd->store_globals(); + thd->security_ctx->skip_grants(); + thd->set_command(COM_DAEMON); thd_proc_info(thd, "Loading slave GTID position from table"); if (rpl_load_gtid_slave_state(thd)) @@ -319,15 +322,22 @@ handle_slave_init(void *arg __attribute__((unused))) mysql_mutex_unlock(&LOCK_thread_count); my_thread_end(); - mysql_mutex_lock(&LOCK_thread_count); + mysql_mutex_lock(&LOCK_slave_init); slave_init_thread_running= false; - mysql_cond_broadcast(&COND_thread_count); - mysql_mutex_unlock(&LOCK_thread_count); + mysql_cond_broadcast(&COND_slave_init); + mysql_mutex_unlock(&LOCK_slave_init); return 0; } +/* + Start the slave init thread. + + This thread is used to load the GTID state from mysql.gtid_slave_pos at + server start; reading from table requires valid THD, which is otherwise not + available during server init. +*/ static int run_slave_init_thread() { @@ -341,10 +351,10 @@ run_slave_init_thread() return 1; } - mysql_mutex_lock(&LOCK_thread_count); + mysql_mutex_lock(&LOCK_slave_init); while (slave_init_thread_running) - mysql_cond_wait(&COND_thread_count, &LOCK_thread_count); - mysql_mutex_unlock(&LOCK_thread_count); + mysql_cond_wait(&COND_slave_init, &LOCK_slave_init); + mysql_mutex_unlock(&LOCK_slave_init); return 0; } @@ -1011,8 +1021,7 @@ static bool io_slave_killed(Master_info* mi) In the event of deffering decision @rli->last_event_start_time waiting timer is set to force the killed status be accepted upon its expiration. - @param thd pointer to a THD instance - @param rli pointer to Relay_log_info instance + @param rgi pointer to relay_group_info instance @return TRUE the killed status is recognized, FALSE a possible killed status is deferred. @@ -1092,21 +1101,21 @@ static bool sql_slave_killed(rpl_group_info *rgi) if (ret == 0) { - rli->report(WARNING_LEVEL, 0, + rli->report(WARNING_LEVEL, 0, rgi->gtid_info(), "Request to stop slave SQL Thread received while " "applying a group that has non-transactional " "changes; waiting for completion of the group ... "); } else { - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), msg_stopped); } } else { ret= TRUE; - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), msg_stopped); } @@ -1524,7 +1533,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master clock failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1589,7 +1598,7 @@ not always make sense; please check the manual before using it)."; goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master SERVER_ID failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1602,7 +1611,7 @@ when it try to get the value of SERVER_ID variable from master."; } else if (!master_row && master_res) { - mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, + mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, NULL, "Unknown system variable 'SERVER_ID' on master, \ maybe it is a *VERY OLD MASTER*."); } @@ -1662,7 +1671,7 @@ be equal for the Statement-format replication to work"; goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master COLLATION_SERVER failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1676,7 +1685,7 @@ when it try to get the value of COLLATION_SERVER global variable from master."; goto err; } else - mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, + mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, NULL, "Unknown system variable 'COLLATION_SERVER' on master, \ maybe it is a *VERY OLD MASTER*. *NOTE*: slave may experience \ inconsistency if replicated data deals with collation."); @@ -1725,7 +1734,7 @@ be equal for the Statement-format replication to work"; goto slave_killed_err; else if (is_network_error(err_code= mysql_errno(mysql))) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Get master TIME_ZONE failed with error: %s", mysql_error(mysql)); goto network_err; @@ -1733,7 +1742,7 @@ be equal for the Statement-format replication to work"; else if (err_code == ER_UNKNOWN_SYSTEM_VARIABLE) { /* We use ERROR_LEVEL to get the error logged to file */ - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "MySQL master doesn't have a TIME_ZONE variable. Note that" "if your timezone is not same between master and slave, your " @@ -1765,15 +1774,35 @@ when it try to get the value of TIME_ZONE global variable from master."; llstr((ulonglong) (mi->heartbeat_period*1000000000UL), llbuf); sprintf(query, query_format, llbuf); - if (mysql_real_query(mysql, query, strlen(query)) - && !check_io_slave_killed(mi, NULL)) + DBUG_EXECUTE_IF("simulate_slave_heartbeat_network_error", + { static ulong dbug_count= 0; + if (++dbug_count < 3) + goto heartbeat_network_error; + }); + if (mysql_real_query(mysql, query, strlen(query))) { - errmsg= "The slave I/O thread stops because SET @master_heartbeat_period " - "on master failed."; - err_code= ER_SLAVE_FATAL_ERROR; - sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); - mysql_free_result(mysql_store_result(mysql)); - goto err; + if (check_io_slave_killed(mi, NULL)) + goto slave_killed_err; + + if (is_network_error(mysql_errno(mysql))) + { + IF_DBUG(heartbeat_network_error: , ) + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, + "SET @master_heartbeat_period to master failed with error: %s", + mysql_error(mysql)); + mysql_free_result(mysql_store_result(mysql)); + goto network_err; + } + else + { + /* Fatal error */ + errmsg= "The slave I/O thread stops because a fatal error is encountered " + "when it tries to SET @master_heartbeat_period on master."; + err_code= ER_SLAVE_FATAL_ERROR; + sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); + mysql_free_result(mysql_store_result(mysql)); + goto err; + } } mysql_free_result(mysql_store_result(mysql)); } @@ -1810,7 +1839,7 @@ when it try to get the value of TIME_ZONE global variable from master."; if (global_system_variables.log_warnings > 1) { // this is tolerable as OM -> NS is supported - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Notifying master by %s failed with " "error: %s", query, mysql_error(mysql)); } @@ -1819,7 +1848,7 @@ when it try to get the value of TIME_ZONE global variable from master."; { if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Notifying master by %s failed with " "error: %s", query, mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); @@ -1855,7 +1884,7 @@ when it try to get the value of TIME_ZONE global variable from master."; goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master BINLOG_CHECKSUM failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1892,7 +1921,7 @@ past_checksum: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting master-side filtering of @@skip_replication failed " "with error: %s", mysql_error(mysql)); goto network_err; @@ -1936,7 +1965,7 @@ past_checksum: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @mariadb_slave_capability failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2002,7 +2031,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_connect_state failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2035,7 +2064,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_gtid_strict_mode failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2068,7 +2097,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_gtid_ignore_duplicates failed with " "error: %s", mysql_error(mysql)); goto network_err; @@ -2104,7 +2133,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_until_gtid failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2152,7 +2181,7 @@ after_set_capability: goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master GTID position failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -2182,7 +2211,7 @@ err: if (master_res) mysql_free_result(master_res); DBUG_ASSERT(err_code != 0); - mi->report(ERROR_LEVEL, err_code, "%s", err_buff); + mi->report(ERROR_LEVEL, err_code, NULL, "%s", err_buff); DBUG_RETURN(1); } @@ -2304,7 +2333,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) Rotate_log_event::DUP_NAME); rli->ign_master_log_name_end[0]= 0; if (unlikely(!(bool)rev)) - mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, NULL, ER(ER_SLAVE_CREATE_EVENT_FAILURE), "Rotate_event (out of memory?)," " SHOW SLAVE STATUS may be inaccurate"); @@ -2315,7 +2344,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) Gtid_list_log_event::FLAG_IGN_GTIDS); rli->ign_gtids.reset(); if (unlikely(!(bool)glev)) - mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, NULL, ER(ER_SLAVE_CREATE_EVENT_FAILURE), "Gtid_list_event (out of memory?)," " gtid_slave_pos may be inaccurate"); @@ -2328,7 +2357,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) DBUG_PRINT("info",("writing a Rotate event to track down ignored events")); rev->server_id= 0; // don't be ignored by slave SQL thread if (unlikely(rli->relay_log.append(rev))) - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "failed to write a Rotate event" " to the relay log, SHOW SLAVE STATUS may be" @@ -2341,7 +2370,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) glev->server_id= 0; // don't be ignored by slave SQL thread glev->set_artificial_event(); // Don't mess up Exec_Master_Log_Pos if (unlikely(rli->relay_log.append(glev))) - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "failed to write a Gtid_list event to the relay log, " "gtid_slave_pos may be inaccurate"); @@ -2426,7 +2455,7 @@ int register_slave_on_master(MYSQL* mysql, Master_info *mi, char buf[256]; my_snprintf(buf, sizeof(buf), "%s (Errno: %d)", mysql_error(mysql), mysql_errno(mysql)); - mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, NULL, ER(ER_SLAVE_MASTER_COM_FAILURE), "COM_REGISTER_SLAVE", buf); } DBUG_RETURN(1); @@ -3097,7 +3126,8 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings) that the error is temporary by pushing a warning with the error code ER_GET_TEMPORARY_ERRMSG, if the originating error is temporary. */ -static int has_temporary_error(THD *thd) +int +has_temporary_error(THD *thd) { DBUG_ENTER("has_temporary_error"); @@ -3297,7 +3327,7 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, if (error) { char buf[22]; - rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, + rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, rgi->gtid_info(), "It was not possible to update the positions" " of the relay log information: the slave may" " be in an inconsistent state." @@ -3313,7 +3343,7 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, Make sure we do not errorneously update gtid_slave_pos with a lingering GTID from this failed event group (MDEV-4906). */ - rgi->gtid_sub_id= 0; + rgi->gtid_pending= false; } DBUG_RETURN(exec_res ? 1 : 0); @@ -3524,9 +3554,6 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, if (opt_gtid_ignore_duplicates) { - serial_rgi->current_gtid.domain_id= gev->domain_id; - serial_rgi->current_gtid.server_id= gev->server_id; - serial_rgi->current_gtid.seq_no= gev->seq_no; int res= rpl_global_gtid_slave_state.check_duplicate_gtid (&serial_rgi->current_gtid, serial_rgi); if (res < 0) @@ -3639,7 +3666,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, DBUG_RETURN(exec_res); } mysql_mutex_unlock(&rli->data_lock); - rli->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_READ_FAILURE, + rli->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_READ_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_READ_FAILURE), "\ Could not parse relay log event entry. The possible reasons are: the master's \ binary log is corrupted (you can check this by running 'mysqlbinlog' on the \ @@ -3734,7 +3761,7 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi, */ if (messages[SLAVE_RECON_MSG_COMMAND][0]) { - mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, + mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, NULL, ER(ER_SLAVE_MASTER_COM_FAILURE), messages[SLAVE_RECON_MSG_COMMAND], buf); } @@ -3824,7 +3851,7 @@ pthread_handler_t handle_slave_io(void *arg) /* Load the set of seen GTIDs, if we did not already. */ if (rpl_load_gtid_slave_state(thd)) { - mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Unable to load replication GTID slave state from mysql.%s: %s", rpl_gtid_slave_state_table_name.str, thd->get_stmt_da()->message()); @@ -3840,14 +3867,14 @@ pthread_handler_t handle_slave_io(void *arg) if (RUN_HOOK(binlog_relay_io, thread_start, (thd, mi))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'thread_start' hook"); goto err; } if (!(mi->mysql = mysql = mysql_init(NULL))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "error in mysql_init()"); goto err; } @@ -4029,18 +4056,18 @@ Log entry on master is longer than slave_max_allowed_packet (%lu) on \ slave. If the entry is correct, restart the server with a higher value of \ slave_max_allowed_packet", slave_max_allowed_packet); - mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE, + mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE, NULL, "%s", "Got a packet bigger than 'slave_max_allowed_packet' bytes"); goto err; case ER_MASTER_FATAL_ERROR_READING_BINLOG: - mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG, + mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG, NULL, ER(ER_MASTER_FATAL_ERROR_READING_BINLOG), mysql_error_number, mysql_error(mysql)); goto err; case ER_OUT_OF_RESOURCES: sql_print_error("\ Stopping slave I/O thread due to out-of-memory error from master"); - mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES, + mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES, NULL, "%s", ER(ER_OUT_OF_RESOURCES)); goto err; } @@ -4057,7 +4084,7 @@ Stopping slave I/O thread due to out-of-memory error from master"); (thd, mi,(const char*)mysql->net.read_pos + 1, event_len, &event_buf, &event_len))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'after_read_event' hook"); goto err; @@ -4068,7 +4095,7 @@ Stopping slave I/O thread due to out-of-memory error from master"); bool synced= 0; if (queue_event(mi, event_buf, event_len)) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "could not queue event from master"); goto err; @@ -4077,7 +4104,7 @@ Stopping slave I/O thread due to out-of-memory error from master"); if (RUN_HOOK(binlog_relay_io, after_queue_event, (thd, mi, event_buf, event_len, synced))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'after_queue_event' hook"); goto err; @@ -4174,9 +4201,10 @@ err_during_init: // TODO: make rpl_status part of Master_info change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE); mysql_mutex_lock(&LOCK_thread_count); + thd->unlink(); + mysql_mutex_unlock(&LOCK_thread_count); THD_CHECK_SENTRY(thd); delete thd; - mysql_mutex_unlock(&LOCK_thread_count); mi->abort_slave= 0; mi->slave_running= MYSQL_SLAVE_NOT_RUN; mi->io_thd= 0; @@ -4265,13 +4293,14 @@ end: void -slave_output_error_info(Relay_log_info *rli, THD *thd) +slave_output_error_info(rpl_group_info *rgi, THD *thd) { /* retrieve as much info as possible from the thd and, error codes and warnings and print this to the error log as to allow the user to locate the error */ + Relay_log_info *rli= rgi->rli; uint32 const last_errno= rli->last_error().number; char llbuff[22]; @@ -4288,7 +4317,8 @@ slave_output_error_info(Relay_log_info *rli, THD *thd) This function is reporting an error which was not reported while executing exec_relay_log_event(). */ - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "%s", errmsg); + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rgi->gtid_info(), "%s", errmsg); } else if (last_errno != thd->get_stmt_da()->sql_errno()) { @@ -4367,6 +4397,7 @@ pthread_handler_t handle_slave_sql(void *arg) char saved_master_log_name[FN_REFLEN]; my_off_t UNINIT_VAR(saved_log_pos); my_off_t UNINIT_VAR(saved_master_log_pos); + String saved_skip_gtid_pos; my_off_t saved_skip= 0; Master_info *mi= ((Master_info*)arg); Relay_log_info* rli = &mi->rli; @@ -4423,7 +4454,7 @@ pthread_handler_t handle_slave_sql(void *arg) will be stuck if we fail here */ mysql_cond_broadcast(&rli->start_cond); - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, "Failed during slave thread initialization"); goto err_during_init; } @@ -4475,16 +4506,20 @@ pthread_handler_t handle_slave_sql(void *arg) mysql_mutex_unlock(&rli->log_space_lock); serial_rgi->gtid_sub_id= 0; + serial_rgi->gtid_pending= false; if (init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, 1 /*need data lock*/, &errmsg, 1 /*look for a description_event*/)) { - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, "Error initializing relay log position: %s", errmsg); goto err; } + if (rli->alloc_inuse_relaylog(rli->group_relay_log_name)) + goto err; + strcpy(rli->future_event_master_log_name, rli->group_master_log_name); THD_CHECK_SENTRY(thd); #ifndef DBUG_OFF @@ -4538,7 +4573,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, if (check_temp_dir(rli->slave_patternload_file)) { - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Unable to use slave's temporary directory %s - %s", slave_load_tmpdir, thd->get_stmt_da()->message()); goto err; @@ -4547,7 +4582,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, /* Load the set of seen GTIDs, if we did not already. */ if (rpl_load_gtid_slave_state(thd)) { - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Unable to load replication GTID slave state from mysql.%s: %s", rpl_gtid_slave_state_table_name.str, thd->get_stmt_da()->message()); @@ -4566,7 +4601,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, execute_init_command(thd, &opt_init_slave, &LOCK_sys_init_slave); if (thd->is_slave_error) { - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Slave SQL thread aborted. Can't execute init_slave query"); goto err; } @@ -4583,6 +4618,12 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, strmake_buf(saved_master_log_name, rli->group_master_log_name); saved_log_pos= rli->group_relay_log_pos; saved_master_log_pos= rli->group_master_log_pos; + if (mi->using_gtid != Master_info::USE_GTID_NO) + { + saved_skip_gtid_pos.append(STRING_WITH_LEN(", GTID '")); + rpl_append_gtid_state(&saved_skip_gtid_pos, false); + saved_skip_gtid_pos.append(STRING_WITH_LEN("'; ")); + } saved_skip= rli->slave_skip_counter; } if ((rli->until_condition == Relay_log_info::UNTIL_MASTER_POS || @@ -4606,16 +4647,27 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, if (saved_skip && rli->slave_skip_counter == 0) { + String tmp; + if (mi->using_gtid != Master_info::USE_GTID_NO) + { + tmp.append(STRING_WITH_LEN(", GTID '")); + rpl_append_gtid_state(&tmp, false); + tmp.append(STRING_WITH_LEN("'; ")); + } + sql_print_information("'SQL_SLAVE_SKIP_COUNTER=%ld' executed at " "relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', " - "master_log_pos='%ld' and new position at " + "master_log_pos='%ld'%s and new position at " "relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', " - "master_log_pos='%ld' ", + "master_log_pos='%ld'%s ", (ulong) saved_skip, saved_log_name, (ulong) saved_log_pos, saved_master_log_name, (ulong) saved_master_log_pos, + saved_skip_gtid_pos.c_ptr_safe(), rli->group_relay_log_name, (ulong) rli->group_relay_log_pos, - rli->group_master_log_name, (ulong) rli->group_master_log_pos); + rli->group_master_log_name, (ulong) rli->group_master_log_pos, + tmp.c_ptr_safe()); saved_skip= 0; + saved_skip_gtid_pos.free(); } if (exec_relay_log_event(thd, rli, serial_rgi)) @@ -4624,7 +4676,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, // do not scare the user if SQL thread was simply killed or stopped if (!sql_slave_killed(serial_rgi)) { - slave_output_error_info(rli, thd); + slave_output_error_info(serial_rgi, thd); #ifdef WITH_WSREP uint32 const last_errno= rli->last_error().number; if (WSREP_ON && last_errno == ER_UNKNOWN_COM_ERROR) @@ -4822,7 +4874,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) xev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&xev))) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "error writing Exec_load event to relay log"); goto err; @@ -4836,7 +4888,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) cev->block_len = num_bytes; if (unlikely(mi->rli.relay_log.append(cev))) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "error writing Create_file event to relay log"); goto err; @@ -4851,7 +4903,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) aev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&aev))) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "error writing Append_block event to relay log"); goto err; @@ -4958,7 +5010,7 @@ static int queue_binlog_ver_1_event(Master_info *mi, const char *buf, { if (unlikely(!(tmp_buf=(char*)my_malloc(event_len+1,MYF(MY_WME))))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Memory allocation failed"); DBUG_RETURN(1); } @@ -5741,7 +5793,7 @@ err: mysql_mutex_unlock(&mi->data_lock); DBUG_PRINT("info", ("error: %d", error)); if (error) - mi->report(ERROR_LEVEL, error, ER(error), + mi->report(ERROR_LEVEL, error, NULL, ER(error), (error == ER_SLAVE_RELAY_LOG_WRITE_FAILURE)? "could not queue event from master" : error_msg.ptr()); @@ -5848,7 +5900,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, #ifndef DBUG_OFF mi->events_till_disconnect = disconnect_slave_event_count; #endif - ulong client_flag= CLIENT_REMEMBER_OPTIONS; + ulong client_flag= 0; if (opt_slave_compressed_protocol) client_flag=CLIENT_COMPRESS; /* We will use compression */ @@ -5886,7 +5938,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, /* we disallow empty users */ if (mi->user == NULL || mi->user[0] == 0) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Invalid (empty) username when attempting to " "connect to the master server. Connection attempt " @@ -5903,7 +5955,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, { last_errno=mysql_errno(mysql); suppress_warnings= 0; - mi->report(ERROR_LEVEL, last_errno, + mi->report(ERROR_LEVEL, last_errno, NULL, "error %s to master '%s@%s:%d'" " - retry-time: %d retries: %lu message: %s", (reconnect ? "reconnecting" : "connecting"), @@ -6435,6 +6487,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) DBUG_ASSERT(rli->cur_log_fd >= 0); mysql_file_close(rli->cur_log_fd, MYF(MY_WME)); rli->cur_log_fd = -1; + rli->last_inuse_relaylog->completed= true; if (relay_log_purge) { @@ -6563,6 +6616,12 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) mysql_mutex_unlock(log_lock); goto err; } + if (rli->alloc_inuse_relaylog(rli->linfo.log_file_name)) + { + if (!hot_log) + mysql_mutex_unlock(log_lock); + goto err; + } if (!hot_log) mysql_mutex_unlock(log_lock); continue; @@ -6578,6 +6637,8 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, &errmsg)) <0) goto err; + if (rli->alloc_inuse_relaylog(rli->linfo.log_file_name)) + goto err; } else { @@ -6716,7 +6777,7 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report, " so slave stops; check error log on slave" " for more info", MYF(0), bug_id); // a verbose message for the error log - rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, + rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, NULL, "According to the master's version ('%s')," " it is probable that master suffers from this bug:" " http://bugs.mysql.com/bug.php?id=%u" diff --git a/sql/slave.h b/sql/slave.h index aa3976f6e6c..4b5bc1686fb 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -229,12 +229,13 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset, void set_slave_thread_options(THD* thd); void set_slave_thread_default_charset(THD *thd, rpl_group_info *rgi); int rotate_relay_log(Master_info* mi); +int has_temporary_error(THD *thd); int apply_event_and_update_pos(Log_event* ev, THD* thd, struct rpl_group_info *rgi, rpl_parallel_thread *rpt); pthread_handler_t handle_slave_io(void *arg); -void slave_output_error_info(Relay_log_info *rli, THD *thd); +void slave_output_error_info(rpl_group_info *rgi, THD *thd); pthread_handler_t handle_slave_sql(void *arg); bool net_request_file(NET* net, const char* fname); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 8a9e8ddc816..366679a2fb0 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -43,6 +43,7 @@ #include "sql_base.h" // close_thread_tables #include "transaction.h" // trans_commit_stmt #include "sql_audit.h" +#include "debug_sync.h" /* Sufficient max length of printed destinations and frame offsets (all uints). @@ -1309,6 +1310,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success) /* Discard the initial part of executing routines. */ thd->profiling.discard_current_query(); #endif + DEBUG_SYNC(thd, "sp_head_execute_before_loop"); do { sp_instr *i; @@ -1885,9 +1887,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, as one select and not resetting THD::user_var_events before each invocation. */ - mysql_mutex_lock(&LOCK_thread_count); - q= global_query_id; - mysql_mutex_unlock(&LOCK_thread_count); + q= get_query_id(); mysql_bin_log.start_union_events(thd, q + 1); binlog_save_options= thd->variables.option_bits; thd->variables.option_bits&= ~OPTION_BIN_LOG; @@ -2323,6 +2323,11 @@ sp_head::restore_lex(THD *thd) */ if (sp_update_sp_used_routines(&m_sroutines, &sublex->sroutines)) DBUG_RETURN(TRUE); + + /* If this substatement is a update query, then mark MODIFIES_DATA */ + if (is_update_query(sublex->sql_command)) + m_flags|= MODIFIES_DATA; + /* Merge tables used by this statement (but not by its functions or procedures) to multiset of tables used by this routine. diff --git a/sql/sp_head.h b/sql/sp_head.h index cc598186d08..dbdb957aa79 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -161,7 +161,21 @@ public: LOG_SLOW_STATEMENTS= 256, // Used by events LOG_GENERAL_LOG= 512, // Used by events HAS_SQLCOM_RESET= 1024, - HAS_SQLCOM_FLUSH= 2048 + HAS_SQLCOM_FLUSH= 2048, + + /** + Marks routines that directly (i.e. not by calling other routines) + change tables. Note that this flag is set automatically based on + type of statements used in the stored routine and is different + from routine characteristic provided by user in a form of CONTAINS + SQL, READS SQL DATA, MODIFIES SQL DATA clauses. The latter are + accepted by parser but pretty much ignored after that. + We don't rely on them: + a) for compatibility reasons. + b) because in CONTAINS SQL case they don't provide enough + information anyway. + */ + MODIFIES_DATA= 4096 }; stored_procedure_type m_type; @@ -332,11 +346,17 @@ public: int add_instr(sp_instr *instr); - inline uint - instructions() - { - return m_instr.elements; - } + /** + Returns true if any substatement in the routine directly + (not through another routine) modifies data/changes table. + + @sa Comment for MODIFIES_DATA flag. + */ + bool modifies_data() const + { return m_flags & MODIFIES_DATA; } + + inline uint instructions() + { return m_instr.elements; } inline sp_instr * last_instruction() diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 43aa68773bc..c980c435c79 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -300,7 +300,7 @@ public: bool eq(const char *user2, const char *host2) { return !cmp(user2, host2); } - bool wild_eq(const char *user2, const char *host2, const char *ip2 = 0) + bool wild_eq(const char *user2, const char *host2, const char *ip2) { if (strcmp(safe_str(user.str), safe_str(user2))) return false; @@ -1883,7 +1883,7 @@ int acl_check_setrole(THD *thd, char *rolename, ulonglong *access) acl_user= (ACL_USER *)acl_user_base; /* Yes! priv_user@host. Don't ask why - that's what check_access() does. */ if (acl_user->wild_eq(thd->security_ctx->priv_user, - thd->security_ctx->host)) + thd->security_ctx->host, thd->security_ctx->ip)) { is_granted= TRUE; break; @@ -12479,7 +12479,7 @@ maria_declare_plugin(mysql_password) NULL, /* status variables */ NULL, /* system variables */ "1.0", /* String version */ - MariaDB_PLUGIN_MATURITY_BETA /* Maturity */ + MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */ }, { MYSQL_AUTHENTICATION_PLUGIN, /* type constant */ @@ -12494,7 +12494,7 @@ maria_declare_plugin(mysql_password) NULL, /* status variables */ NULL, /* system variables */ "1.0", /* String version */ - MariaDB_PLUGIN_MATURITY_BETA /* Maturity */ + MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */ } maria_declare_plugin_end; diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index ee70914d331..f24bb2cff93 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -914,7 +914,7 @@ send_result_message: protocol->store(operator_name, system_charset_info); if (result_code) // either mysql_recreate_table or analyze failed { - DBUG_ASSERT(thd->is_error() || thd->killed); + DBUG_ASSERT(thd->is_error()); if (thd->is_error()) { const char *err_msg= thd->get_stmt_da()->message(); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 431a334436e..e253f467b5c 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2088,7 +2088,10 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, DBUG_RETURN(TRUE); if (!(flags & MYSQL_OPEN_IGNORE_KILLED) && thd->killed) + { + thd->send_kill_message(); DBUG_RETURN(TRUE); + } /* Check if we're trying to take a write lock in a read only transaction. @@ -3518,9 +3521,12 @@ Open_table_context::recover_from_failed_open() /* Return a appropriate read lock type given a table object. - @param thd Thread context - @param prelocking_ctx Prelocking context. - @param table_list Table list element for table to be locked. + @param thd Thread context + @param prelocking_ctx Prelocking context. + @param table_list Table list element for table to be locked. + @param routine_modifies_data + Some routine that is invoked by statement + modifies data. @remark Due to a statement-based replication limitation, statements such as INSERT INTO .. SELECT FROM .. and CREATE TABLE .. SELECT FROM need @@ -3533,9 +3539,13 @@ Open_table_context::recover_from_failed_open() This also applies to SELECT/SET/DO statements which use stored functions. Calls to such functions are going to be logged as a whole and thus should be serialized against concurrent changes - to tables used by those functions. This can be avoided if functions - only read data but doing so requires more complex analysis than it - is done now. + to tables used by those functions. This is avoided when functions + do not modify data but only read it, since in this case nothing is + written to the binary log. Argument routine_modifies_data + denotes the same. So effectively, if the statement is not a + update query and routine_modifies_data is false, then + prelocking_placeholder does not take importance. + Furthermore, this does not apply to I_S and log tables as it's always unsafe to replicate such tables under statement-based replication as the table on the slave might contain other data @@ -3550,7 +3560,8 @@ Open_table_context::recover_from_failed_open() thr_lock_type read_lock_type_for_table(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list) + TABLE_LIST *table_list, + bool routine_modifies_data) { /* In cases when this function is called for a sub-statement executed in @@ -3565,7 +3576,7 @@ thr_lock_type read_lock_type_for_table(THD *thd, (table_list->table->s->table_category == TABLE_CATEGORY_LOG) || (table_list->table->s->table_category == TABLE_CATEGORY_PERFORMANCE) || !(is_update_query(prelocking_ctx->sql_command) || - table_list->prelocking_placeholder || + (routine_modifies_data && table_list->prelocking_placeholder) || (thd->locked_tables_mode > LTM_LOCK_TABLES))) return TL_READ; else @@ -3578,19 +3589,21 @@ thr_lock_type read_lock_type_for_table(THD *thd, and, if prelocking strategy prescribes so, extend the prelocking set with tables and routines used by it. - @param[in] thd Thread context. - @param[in] prelocking_ctx Prelocking context. - @param[in] rt Element of prelocking set to be processed. - @param[in] prelocking_strategy Strategy which specifies how the - prelocking set should be extended when - one of its elements is processed. - @param[in] has_prelocking_list Indicates that prelocking set/list for - this statement has already been built. - @param[in] ot_ctx Context of open_table used to recover from - locking failures. - @param[out] need_prelocking Set to TRUE if it was detected that this - statement will require prelocked mode for - its execution, not touched otherwise. + @param[in] thd Thread context. + @param[in] prelocking_ctx Prelocking context. + @param[in] rt Element of prelocking set to be processed. + @param[in] prelocking_strategy Strategy which specifies how the + prelocking set should be extended when + one of its elements is processed. + @param[in] has_prelocking_list Indicates that prelocking set/list for + this statement has already been built. + @param[in] ot_ctx Context of open_table used to recover from + locking failures. + @param[out] need_prelocking Set to TRUE if it was detected that this + statement will require prelocked mode for + its execution, not touched otherwise. + @param[out] routine_modifies_data Set to TRUE if it was detected that this + routine does modify table data. @retval FALSE Success. @retval TRUE Failure (Conflicting metadata lock, OOM, other errors). @@ -3602,11 +3615,13 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx, Prelocking_strategy *prelocking_strategy, bool has_prelocking_list, Open_table_context *ot_ctx, - bool *need_prelocking) + bool *need_prelocking, bool *routine_modifies_data) { MDL_key::enum_mdl_namespace mdl_type= rt->mdl_request.key.mdl_namespace(); DBUG_ENTER("open_and_process_routine"); + *routine_modifies_data= false; + switch (mdl_type) { case MDL_key::FUNCTION: @@ -3659,10 +3674,13 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx, DBUG_RETURN(TRUE); /* 'sp' is NULL when there is no such routine. */ - if (sp && !has_prelocking_list) + if (sp) { - prelocking_strategy->handle_routine(thd, prelocking_ctx, rt, sp, - need_prelocking); + *routine_modifies_data= sp->modifies_data(); + + if (!has_prelocking_list) + prelocking_strategy->handle_routine(thd, prelocking_ctx, rt, sp, + need_prelocking); } } else @@ -4007,16 +4025,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, goto end; } - if (tables->lock_type != TL_UNLOCK && ! thd->locked_tables_mode) - { - if (tables->lock_type == TL_WRITE_DEFAULT) - tables->table->reginfo.lock_type= thd->update_lock_default; - else if (tables->lock_type == TL_READ_DEFAULT) - tables->table->reginfo.lock_type= - read_lock_type_for_table(thd, lex, tables); - else - tables->table->reginfo.lock_type= tables->lock_type; - } + /* Copy grant information from TABLE_LIST instance to TABLE one. */ tables->table->grant= tables->grant; /* Check and update metadata version of a base table. */ @@ -4355,6 +4364,7 @@ bool open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags, Open_table_context ot_ctx(thd, flags); bool error= FALSE; MEM_ROOT new_frm_mem; + bool some_routine_modifies_data= FALSE; bool has_prelocking_list; DBUG_ENTER("open_tables"); @@ -4527,11 +4537,16 @@ restart: sroutine_to_open= &rt->next, rt= rt->next) { bool need_prelocking= false; + bool routine_modifies_data; TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last; error= open_and_process_routine(thd, thd->lex, rt, prelocking_strategy, has_prelocking_list, &ot_ctx, - &need_prelocking); + &need_prelocking, + &routine_modifies_data); + + // Remember if any of SF modifies data. + some_routine_modifies_data|= routine_modifies_data; if (need_prelocking && ! thd->lex->requires_prelocking()) thd->lex->mark_as_requiring_prelocking(save_query_tables_last); @@ -4571,6 +4586,10 @@ restart: children, attach the children to their parents. At end of statement, the children are detached. Attaching and detaching are always done, even under LOCK TABLES. + + We also convert all TL_WRITE_DEFAULT and TL_READ_DEFAULT locks to + appropriate "real" lock types to be used for locking and to be passed + to storage engine. */ for (tables= *start; tables; tables= tables->next_global) { @@ -4587,6 +4606,19 @@ restart: goto err; } } + + /* Set appropriate TABLE::lock_type. */ + if (tbl && tables->lock_type != TL_UNLOCK && !thd->locked_tables_mode) + { + if (tables->lock_type == TL_WRITE_DEFAULT) + tbl->reginfo.lock_type= thd->update_lock_default; + else if (tables->lock_type == TL_READ_DEFAULT) + tbl->reginfo.lock_type= + read_lock_type_for_table(thd, thd->lex, tables, + some_routine_modifies_data); + else + tbl->reginfo.lock_type= tables->lock_type; + } } #ifdef WITH_WSREP if ((thd->lex->sql_command== SQLCOM_INSERT || @@ -4875,11 +4907,15 @@ static bool check_lock_and_start_stmt(THD *thd, engine is important as, for example, InnoDB uses it to determine what kind of row locks should be acquired when executing statement in prelocked mode or under LOCK TABLES with @@innodb_table_locks = 0. + + Last argument routine_modifies_data for read_lock_type_for_table() + is ignored, as prelocking placeholder will never be set here. */ + DBUG_ASSERT(table_list->prelocking_placeholder == false); if (table_list->lock_type == TL_WRITE_DEFAULT) lock_type= thd->update_lock_default; else if (table_list->lock_type == TL_READ_DEFAULT) - lock_type= read_lock_type_for_table(thd, prelocking_ctx, table_list); + lock_type= read_lock_type_for_table(thd, prelocking_ctx, table_list, true); else lock_type= table_list->lock_type; diff --git a/sql/sql_base.h b/sql/sql_base.h index 61442843a39..8a0a1e42500 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -136,7 +136,8 @@ TABLE *find_write_locked_table(TABLE *list, const char *db, const char *table_name); thr_lock_type read_lock_type_for_table(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list); + TABLE_LIST *table_list, + bool routine_modifies_data); my_bool mysql_rm_tmp_tables(void); bool rm_temporary_table(handlerton *base, const char *path); diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index cf68ba36997..6001517b0c7 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1711,7 +1711,7 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length) DBUG_ENTER("Query_cache::send_result_to_client"); /* - Testing 'query_cache_size' without a lock here is safe: the thing + Testing without a lock here is safe: the thing we may loose is that the query won't be served from cache, but we save on mutex locking in the case when query cache is disabled. @@ -1731,8 +1731,6 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length) goto err; } - DBUG_ASSERT(query_cache_size != 0); // otherwise cache would be disabled - thd->query_cache_is_applicable= 1; sql= org_sql; sql_end= sql + query_length; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 198a67ac712..d12477e81f2 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2013, Monty Program Ab. + Copyright (c) 2008, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1054,7 +1054,6 @@ THD::THD() accessed_rows_and_keys(0), m_statement_psi(NULL), m_idle_psi(NULL), - m_server_idle(false), thread_id(0), global_disable_checkpoint(0), failed_com_change_user(0), @@ -4482,6 +4481,219 @@ extern "C" int thd_rpl_is_parallel(const MYSQL_THD thd) return thd->rgi_slave && thd->rgi_slave->is_parallel_exec; } +/* + This function can optionally be called to check if thd_report_wait_for() + needs to be called for waits done by a given transaction. + + If this function returns false for a given thd, there is no need to do any + calls to thd_report_wait_for() on that thd. + + This call is optional; it is safe to call thd_report_wait_for() in any case. + This call can be used to save some redundant calls to thd_report_wait_for() + if desired. (This is unlikely to matter much unless there are _lots_ of + waits to report, as the overhead of thd_report_wait_for() is small). +*/ +extern "C" int +thd_need_wait_for(const MYSQL_THD thd) +{ + rpl_group_info *rgi; + + if (!thd) + return false; + rgi= thd->rgi_slave; + if (!rgi) + return false; + return rgi->is_parallel_exec; +} + +/* + Used by InnoDB/XtraDB to report that one transaction THD is about to go to + wait for a transactional lock held by another transactions OTHER_THD. + + This is used for parallel replication, where transactions are required to + commit in the same order on the slave as they did on the master. If the + transactions on the slave encounters lock conflicts on the slave that did + not exist on the master, this can cause deadlocks. + + Normally, such conflicts will not occur, because the same conflict would + have prevented the two transactions from committing in parallel on the + master, thus preventing them from running in parallel on the slave in the + first place. However, it is possible in case when the optimizer chooses a + different plan on the slave than on the master (eg. table scan instead of + index scan). + + InnoDB/XtraDB reports lock waits using this call. If a lock wait causes a + deadlock with the pre-determined commit order, we kill the later transaction, + and later re-try it, to resolve the deadlock. + + This call need only receive reports about waits for locks that will remain + until the holding transaction commits. InnoDB/XtraDB auto-increment locks + are released earlier, and so need not be reported. (Such false positives are + not harmful, but could lead to unnecessary kill and retry, so best avoided). +*/ +extern "C" void +thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd) +{ + rpl_group_info *rgi; + rpl_group_info *other_rgi; + + if (!thd || !other_thd) + return; + rgi= thd->rgi_slave; + other_rgi= other_thd->rgi_slave; + if (!rgi || !other_rgi) + return; + if (!rgi->is_parallel_exec) + return; + if (rgi->rli != other_rgi->rli) + return; + if (!rgi->gtid_sub_id || !other_rgi->gtid_sub_id) + return; + if (rgi->current_gtid.domain_id != other_rgi->current_gtid.domain_id) + return; + if (rgi->gtid_sub_id > other_rgi->gtid_sub_id) + return; + /* + This transaction is about to wait for another transaction that is required + by replication binlog order to commit after. This would cause a deadlock. + + So send a kill to the other transaction, with a temporary error; this will + cause replication to rollback (and later re-try) the other transaction, + releasing the lock for this transaction so replication can proceed. + */ + other_rgi->killed_for_retry= true; + mysql_mutex_lock(&other_thd->LOCK_thd_data); + other_thd->awake(KILL_CONNECTION); + mysql_mutex_unlock(&other_thd->LOCK_thd_data); +} + +/* + This function is called from InnoDB/XtraDB to check if the commit order of + two transactions has already been decided by the upper layer. This happens + in parallel replication, where the commit order is forced to be the same on + the slave as it was originally on the master. + + If this function returns false, it means that such commit order will be + enforced. This allows the storage engine to optionally omit gap lock waits + or similar measures that would otherwise be needed to ensure that + transactions would be serialised in a way that would cause a commit order + that is correct for binlogging for statement-based replication. + + Since transactions are only run in parallel on the slave if they ran without + lock conflicts on the master, normally no lock conflicts on the slave happen + during parallel replication. However, there are a couple of corner cases + where it can happen, like these secondary-index operations: + + T1: INSERT INTO t1 VALUES (7, NULL); + T2: DELETE FROM t1 WHERE b <= 3; + + T1: UPDATE t1 SET secondary=NULL WHERE primary=1 + T2: DELETE t1 WHERE secondary <= 3 + + The DELETE takes a gap lock that can block the INSERT/UPDATE, but the row + locks set by INSERT/UPDATE do not block the DELETE. Thus, the execution + order of the transactions determine whether a lock conflict occurs or + not. Thus a lock conflict can occur on the slave where it did not on the + master. + + If this function returns true, normal locking should be done as required by + the binlogging and transaction isolation level in effect. But if it returns + false, the correct order will be enforced anyway, and InnoDB/XtraDB can + avoid taking the gap lock, preventing the lock conflict. + + Calling this function is just an optimisation to avoid unnecessary + deadlocks. If it was not used, a gap lock would be set that could eventually + cause a deadlock; the deadlock would be caught by thd_report_wait_for() and + the transaction T2 killed and rolled back (and later re-tried). +*/ +extern "C" int +thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd) +{ + rpl_group_info *rgi, *other_rgi; + + if (!thd || !other_thd) + return 1; + rgi= thd->rgi_slave; + other_rgi= other_thd->rgi_slave; + if (!rgi || !other_rgi) + return 1; + if (!rgi->is_parallel_exec) + return 1; + if (rgi->rli != other_rgi->rli) + return 1; + if (rgi->current_gtid.domain_id != other_rgi->current_gtid.domain_id) + return 1; + if (!rgi->commit_id || rgi->commit_id != other_rgi->commit_id) + return 1; + /* + These two threads are doing parallel replication within the same + replication domain. Their commit order is already fixed, so we do not need + gap locks or similar to otherwise enforce ordering (and in fact such locks + could lead to unnecessary deadlocks and transaction retry). + */ + return 0; +} + + +/* + If the storage engine detects a deadlock, and needs to choose a victim + transaction to roll back, it can call this function to ask the upper + server layer for which of two possible transactions is prefered to be + aborted and rolled back. + + In parallel replication, if two transactions are running in parallel and + one is fixed to commit before the other, then the one that commits later + will be prefered as the victim - chosing the early transaction as a victim + will not resolve the deadlock anyway, as the later transaction still needs + to wait for the earlier to commit. + + Otherwise, a transaction that uses only transactional tables, and can thus + be safely rolled back, will be prefered as a deadlock victim over a + transaction that also modified non-transactional (eg. MyISAM) tables. + + The return value is -1 if the first transaction is prefered as a deadlock + victim, 1 if the second transaction is prefered, or 0 for no preference (in + which case the storage engine can make the choice as it prefers). +*/ +extern "C" int +thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2) +{ + rpl_group_info *rgi1, *rgi2; + bool nontrans1, nontrans2; + + if (!thd1 || !thd2) + return 0; + + /* + If the transactions are participating in the same replication domain in + parallel replication, then request to select the one that will commit + later (in the fixed commit order from the master) as the deadlock victim. + */ + rgi1= thd1->rgi_slave; + rgi2= thd2->rgi_slave; + if (rgi1 && rgi2 && + rgi1->is_parallel_exec && + rgi1->rli == rgi2->rli && + rgi1->current_gtid.domain_id == rgi2->current_gtid.domain_id) + return rgi1->gtid_sub_id < rgi2->gtid_sub_id ? 1 : -1; + + /* + If one transaction has modified non-transactional tables (so that it + cannot be safely rolled back), and the other has not, then prefer to + select the purely transactional one as the victim. + */ + nontrans1= thd1->transaction.all.modified_non_trans_table; + nontrans2= thd2->transaction.all.modified_non_trans_table; + if (nontrans1 && !nontrans2) + return 1; + else if (!nontrans1 && nontrans2) + return -1; + + /* No preferences, let the storage engine decide. */ + return 0; +} + + extern "C" int thd_non_transactional_update(const MYSQL_THD thd) { return(thd->transaction.all.modified_non_trans_table); @@ -6700,6 +6912,7 @@ wait_for_commit::unregister_wait_for_prior_commit2() this->waitee= NULL; } } + wakeup_error= 0; mysql_mutex_unlock(&LOCK_wait_commit); } diff --git a/sql/sql_class.h b/sql/sql_class.h index b64006d7b71..98a76dce1fb 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -721,6 +721,7 @@ typedef struct system_status_var ulong filesort_range_count_; ulong filesort_rows_; ulong filesort_scan_count_; + ulong filesort_pq_sorts_; /* Prepared statements and binary protocol */ ulong com_stmt_prepare; ulong com_stmt_reprepare; @@ -774,6 +775,13 @@ typedef struct system_status_var #define last_system_status_var questions #define last_cleared_system_status_var memory_used +/* + Global status variables +*/ + +extern ulong feature_files_opened_with_delayed_keys; + + void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var, @@ -1374,7 +1382,8 @@ enum enum_thread_type SYSTEM_THREAD_NDBCLUSTER_BINLOG= 8, SYSTEM_THREAD_EVENT_SCHEDULER= 16, SYSTEM_THREAD_EVENT_WORKER= 32, - SYSTEM_THREAD_BINLOG_BACKGROUND= 64 + SYSTEM_THREAD_BINLOG_BACKGROUND= 64, + SYSTEM_THREAD_SLAVE_INIT= 128, }; inline char const * @@ -1757,6 +1766,8 @@ struct wait_for_commit { if (waitee) unregister_wait_for_prior_commit2(); + else + wakeup_error= 0; } /* Remove a waiter from the list in the waitee. Used to unregister a wait. @@ -2514,8 +2525,6 @@ public: /** Idle instrumentation state. */ PSI_idle_locker_state m_idle_state; #endif /* HAVE_PSI_IDLE_INTERFACE */ - /** True if the server code is IDLE for this connection. */ - bool m_server_idle; /* Id of current query. Statement can be reused to execute several queries @@ -2948,6 +2957,11 @@ public: // End implementation of MDL_context_owner interface. + inline bool use_cond_push(handler *file) + { + return (variables.optimizer_switch & OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN) + || (file->ha_table_flags() & HA_MUST_USE_TABLE_CONDITION_PUSHDOWN); + } inline bool is_strict_mode() const { return (bool) (variables.sql_mode & (MODE_STRICT_TRANS_TABLES | diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 04c03e0c23e..08605bf2c85 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -916,7 +916,8 @@ multi_delete::initialize_tables(JOIN *join) walk= delete_tables; - for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES); + for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, + WITH_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS)) { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index a910ed6290f..8e8dbfc71d4 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -465,6 +465,8 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived) } } + if (!derived->merged_for_insert) + dt_select->first_cond_optimization= FALSE; // consider it optimized exit_merge: if (arena) thd->restore_active_arena(arena, &backup); @@ -614,6 +616,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) SELECT_LEX_UNIT *unit= derived->get_unit(); DBUG_ENTER("mysql_derived_prepare"); bool res= FALSE; + DBUG_PRINT("enter", ("unit 0x%lx", (ulong) unit)); // Skip already prepared views/DT if (!unit || unit->prepared || @@ -623,9 +626,6 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) thd->lex->sql_command == SQLCOM_DELETE_MULTI)))) DBUG_RETURN(FALSE); - Query_arena *arena, backup; - arena= thd->activate_stmt_arena_if_needed(&backup); - SELECT_LEX *first_select= unit->first_select(); /* prevent name resolving out of derived table */ @@ -743,8 +743,6 @@ exit: if (derived->outer_join) table->maybe_null= 1; } - if (arena) - thd->restore_active_arena(arena, &backup); DBUG_RETURN(res); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 2097f636dd2..12854124b02 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2013, Monty Program Ab. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4161,15 +4161,14 @@ select_create::binlog_show_create_table(TABLE **tables, uint count) { /* Note 1: In RBR mode, we generate a CREATE TABLE statement for the - created table by calling store_create_info() (behaves as SHOW - CREATE TABLE). In the event of an error, nothing should be - written to the binary log, even if the table is non-transactional; - therefore we pretend that the generated CREATE TABLE statement is - for a transactional table. The event will then be put in the - transaction cache, and any subsequent events (e.g., table-map - events and binrow events) will also be put there. We can then use - ha_autocommit_or_rollback() to either throw away the entire - kaboodle of events, or write them to the binary log. + created table by calling show_create_table(). In the event of an error, + nothing should be written to the binary log, even if the table is + non-transactional; therefore we pretend that the generated CREATE TABLE + statement is for a transactional table. The event will then be put in the + transaction cache, and any subsequent events (e.g., table-map events and + binrow events) will also be put there. We can then use + ha_autocommit_or_rollback() to either throw away the entire kaboodle of + events, or write them to the binary log. We write the CREATE TABLE statement here and not in prepare() since there potentially are sub-selects or accesses to information @@ -4188,12 +4187,9 @@ select_create::binlog_show_create_table(TABLE **tables, uint count) tmp_table_list.table = *tables; query.length(0); // Have to zero it since constructor doesn't - result= store_create_info(thd, &tmp_table_list, &query, create_info, - /* show_database */ TRUE, - MY_TEST(create_info->org_options & - HA_LEX_CREATE_REPLACE) || - create_info->table_was_deleted); - DBUG_ASSERT(result == 0); /* store_create_info() always return 0 */ + result= show_create_table(thd, &tmp_table_list, &query, create_info, + WITH_DB_NAME); + DBUG_ASSERT(result == 0); /* show_create_table() always return 0 */ #ifdef WITH_WSREP if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 26cef090b83..3bd23692487 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2013, Monty Program Ab. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -30,7 +30,7 @@ #include "sp.h" #include "sql_select.h" -static int lex_one_token(void *arg, THD *thd); +static int lex_one_token(YYSTYPE *yylval, THD *thd); /* We are using pointer to this variable for distinguishing between assignment @@ -958,15 +958,17 @@ bool consume_comment(Lex_input_stream *lip, int remaining_recursions_permitted) /* MYSQLlex remember the following states from the following MYSQLlex() + @param yylval [out] semantic value of the token being parsed (yylval) + @param thd THD + - MY_LEX_EOQ Found end of query - MY_LEX_OPERATOR_OR_IDENT Last state was an ident, text or number (which can't be followed by a signed number) */ -int MYSQLlex(void *arg, THD *thd) +int MYSQLlex(YYSTYPE *yylval, THD *thd) { Lex_input_stream *lip= & thd->m_parser_state->m_lip; - YYSTYPE *yylval=(YYSTYPE*) arg; int token; if (lip->lookahead_token >= 0) @@ -983,7 +985,7 @@ int MYSQLlex(void *arg, THD *thd) return token; } - token= lex_one_token(arg, thd); + token= lex_one_token(yylval, thd); switch(token) { case WITH: @@ -994,7 +996,7 @@ int MYSQLlex(void *arg, THD *thd) to transform the grammar into a LALR(1) grammar, which sql_yacc.yy can process. */ - token= lex_one_token(arg, thd); + token= lex_one_token(yylval, thd); switch(token) { case CUBE_SYM: lip->m_digest_psi= MYSQL_ADD_TOKEN(lip->m_digest_psi, WITH_CUBE_SYM, @@ -1023,7 +1025,7 @@ int MYSQLlex(void *arg, THD *thd) return token; } -int lex_one_token(void *arg, THD *thd) +static int lex_one_token(YYSTYPE *yylval, THD *thd) { reg1 uchar c; bool comment_closed; @@ -1032,7 +1034,6 @@ int lex_one_token(void *arg, THD *thd) enum my_lex_states state; Lex_input_stream *lip= & thd->m_parser_state->m_lip; LEX *lex= thd->lex; - YYSTYPE *yylval=(YYSTYPE*) arg; CHARSET_INFO *const cs= thd->charset(); const uchar *const state_map= cs->state_map; const uchar *const ident_map= cs->ident_map; @@ -3292,7 +3293,7 @@ static void fix_prepare_info_in_table_list(THD *thd, TABLE_LIST *tbl) { for (; tbl; tbl= tbl->next_local) { - if (tbl->on_expr) + if (tbl->on_expr && !tbl->prep_on_expr) { thd->check_and_register_item_tree(&tbl->prep_on_expr, &tbl->on_expr); tbl->on_expr= tbl->on_expr->copy_andor_structure(thd); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 76288e94c75..e15901a9c54 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2010, 2013, Monty Program Ab. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2911,7 +2911,7 @@ extern void lex_start(THD *thd); extern void lex_end(LEX *lex); void end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex); int init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex); -extern int MYSQLlex(void *arg, THD *thd); +extern int MYSQLlex(union YYSTYPE *yylval, THD *thd); extern void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 4e19d648cfa..0d1a3ad6ea7 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2013, Monty Program Ab + Copyright (c) 2008, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -951,9 +951,8 @@ bool do_command(THD *thd) */ DEBUG_SYNC(thd, "before_do_command_net_read"); - thd->m_server_idle= TRUE; - packet_length= my_net_read(net); - thd->m_server_idle= FALSE; + packet_length= my_net_read_packet(net, 1); + #ifdef WITH_WSREP if (WSREP(thd)) { mysql_mutex_lock(&thd->LOCK_wsrep_thd); @@ -3244,7 +3243,11 @@ case SQLCOM_PREPARE: goto end_with_restore_list; } + /* Copy temporarily the statement flags to thd for lock_table_names() */ + uint save_thd_create_info_options= thd->lex->create_info.options; + thd->lex->create_info.options|= create_info.options; res= open_and_lock_tables(thd, lex->query_tables, TRUE, 0); + thd->lex->create_info.options= save_thd_create_info_options; if (res) { /* Got error or warning. Set res to 1 if error */ @@ -7962,7 +7965,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user, I_List_iterator<THD> it(threads); while ((tmp=it++)) { - if (tmp->get_command() == COM_DAEMON) + if (!tmp->security_ctx->user) continue; /* Check that hostname (if given) and user name matches. diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 1ce952b9030..18e49d878cd 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2005, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2013, Monty Program Ab. +/* Copyright (c) 2005, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -3167,19 +3167,28 @@ uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info, uint num_columns= part_info->part_field_list.elements; uint list_index; uint min_list_index= 0; + int cmp; + /* Notice that max_list_index = last_index + 1 here! */ uint max_list_index= part_info->num_list_values; DBUG_ENTER("get_partition_id_cols_list_for_endpoint"); /* Find the matching partition (including taking endpoint into account). */ do { - /* Midpoint, adjusted down, so it can never be > last index. */ + /* Midpoint, adjusted down, so it can never be >= max_list_index. */ list_index= (max_list_index + min_list_index) >> 1; - if (cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns, - nparts, left_endpoint, include_endpoint) > 0) + cmp= cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns, + nparts, left_endpoint, include_endpoint); + if (cmp > 0) + { min_list_index= list_index + 1; + } else + { max_list_index= list_index; + if (cmp == 0) + break; + } } while (max_list_index > min_list_index); list_index= max_list_index; @@ -3196,12 +3205,10 @@ uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info, nparts, left_endpoint, include_endpoint))); - if (!left_endpoint) - { - /* Set the end after this list tuple if not already after the last. */ - if (list_index < part_info->num_parts) - list_index++; - } + /* Include the right endpoint if not already passed end of array. */ + if (!left_endpoint && include_endpoint && cmp == 0 && + list_index < part_info->num_list_values) + list_index++; DBUG_RETURN(list_index); } @@ -7573,15 +7580,13 @@ static int cmp_rec_and_tuple_prune(part_column_list_val *val, field= val->part_info->part_field_array + n_vals_in_rec; if (!(*field)) { - /* - Full match, if right endpoint and not including the endpoint, - (rec < part) return lesser. - */ - if (!is_left_endpoint && !include_endpoint) - return -4; + /* Full match. Only equal if including endpoint. */ + if (include_endpoint) + return 0; - /* Otherwise they are equal! */ - return 0; + if (is_left_endpoint) + return +4; /* Start of range, part_tuple < rec, return higher. */ + return -4; /* End of range, rec < part_tupe, return lesser. */ } /* The prefix is equal and there are more partition columns to compare. diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 1348ebec3c8..fb9bb05bcb4 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1040,7 +1040,7 @@ static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin) static bool plugin_add(MEM_ROOT *tmp_root, const LEX_STRING *name, LEX_STRING *dl, int report) { - struct st_plugin_int tmp; + struct st_plugin_int tmp, *maybe_dupe; struct st_maria_plugin *plugin; uint oks= 0, errs= 0, dupes= 0; DBUG_ENTER("plugin_add"); @@ -1070,8 +1070,14 @@ static bool plugin_add(MEM_ROOT *tmp_root, (const uchar *)tmp.name.str, tmp.name.length)) continue; // plugin name doesn't match - if (!name->str && plugin_find_internal(&tmp.name, MYSQL_ANY_PLUGIN)) + if (!name->str && + (maybe_dupe= plugin_find_internal(&tmp.name, MYSQL_ANY_PLUGIN))) { + if (plugin->name != maybe_dupe->plugin->name) + { + report_error(report, ER_UDF_EXISTS, plugin->name); + DBUG_RETURN(TRUE); + } dupes++; continue; // already installed } @@ -1607,7 +1613,7 @@ int plugin_init(int *argc, char **argv, int flags) if (plugin_initialize(&tmp_root, plugin_ptr, argc, argv, !is_myisam && (flags & PLUGIN_INIT_SKIP_INITIALIZATION))) { - if (mandatory) + if (plugin_ptr->load_option == PLUGIN_FORCE) goto err_unlock; plugin_ptr->state= PLUGIN_IS_DISABLED; } @@ -3073,10 +3079,10 @@ void plugin_thdvar_init(THD *thd) { plugin_ref old_table_plugin= thd->variables.table_plugin; DBUG_ENTER("plugin_thdvar_init"); - + thd->variables.table_plugin= NULL; cleanup_variables(thd, &thd->variables); - + thd->variables= global_system_variables; thd->variables.table_plugin= NULL; @@ -3339,7 +3345,7 @@ bool sys_var_pluginvar::session_update(THD *thd, set_var *var) mysql_mutex_unlock(&LOCK_global_system_variables); plugin_var->update(thd, plugin_var, tgt, src); - + return false; } @@ -3763,7 +3769,7 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp, if (opt->flags & PLUGIN_VAR_NOCMDOPT) continue; - optname= (char*) memdup_root(mem_root, v->key + 1, + optname= (char*) memdup_root(mem_root, v->key + 1, (optnamelen= v->name_len) + 1); } @@ -4013,7 +4019,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, } DBUG_RETURN(0); - + err: if (tmp_backup) my_afree(tmp_backup); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index eaed0660e46..8cd1c84b668 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2013, Monty Program Ab + Copyright (c) 2008, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -3084,6 +3084,7 @@ int reset_slave(THD *thd, Master_info* mi) mi->clear_error(); mi->rli.clear_error(); mi->rli.clear_until_condition(); + mi->rli.slave_skip_counter= 0; // close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0 end_master_info(mi); @@ -3531,6 +3532,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) /* Clear the errors, for a clean start */ mi->rli.clear_error(); mi->rli.clear_until_condition(); + mi->rli.slave_skip_counter= 0; sql_print_information("'CHANGE MASTER TO executed'. " "Previous state master_host='%s', master_port='%u', master_log_file='%s', " diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 2e4227ed8a0..160ffe11abc 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1441,7 +1441,8 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S Perform the optimization on fields evaluation mentioned above for all on expressions. */ - for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); tab; + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { if (*tab->on_expr_ref) @@ -1464,7 +1465,7 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S Perform the optimization on fields evaliation mentioned above for all used ref items. */ - for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { uint key_copy_index=0; @@ -2099,7 +2100,8 @@ bool JOIN::setup_subquery_caches() if (conds) conds= conds->transform(&Item::expr_cache_insert_transformer, (uchar*) thd); - for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { if (tab->select_cond) @@ -2261,7 +2263,8 @@ JOIN::reinit() /* need to reset ref access state (see join_read_key) */ if (join_tab) { - for (JOIN_TAB *tab= first_linear_tab(this, WITH_CONST_TABLES); tab; + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { tab->ref.key_err= TRUE; @@ -3120,8 +3123,9 @@ JOIN::destroy() { if (join_tab != tmp_join->join_tab) { - for (JOIN_TAB *tab= first_linear_tab(this, WITH_CONST_TABLES); tab; - tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); + tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { tab->cleanup(); } @@ -8186,14 +8190,24 @@ JOIN_TAB *next_top_level_tab(JOIN *join, JOIN_TAB *tab) } -JOIN_TAB *first_linear_tab(JOIN *join, enum enum_with_const_tables const_tbls) +JOIN_TAB *first_linear_tab(JOIN *join, + enum enum_with_bush_roots include_bush_roots, + enum enum_with_const_tables const_tbls) { JOIN_TAB *first= join->join_tab; if (const_tbls == WITHOUT_CONST_TABLES) first+= join->const_tables; - if (first < join->join_tab + join->top_join_tab_count) - return first; - return NULL; /* All tables were const tables */ + + if (first >= join->join_tab + join->top_join_tab_count) + return NULL; /* All are const tables */ + + if (first->bush_children && include_bush_roots == WITHOUT_BUSH_ROOTS) + { + /* This JOIN_TAB is a SJM nest; Start from first table in nest */ + return first->bush_children->start; + } + + return first; } @@ -9045,9 +9059,10 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2) static void add_not_null_conds(JOIN *join) { + JOIN_TAB *tab; DBUG_ENTER("add_not_null_conds"); - for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -9218,7 +9233,7 @@ make_outerjoin_info(JOIN *join) tab->table->pos_in_table_list being set. */ JOIN_TAB *tab; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -9230,7 +9245,7 @@ make_outerjoin_info(JOIN *join) } } - for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); tab; + for (JOIN_TAB *tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { TABLE *table= tab->table; @@ -9530,11 +9545,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) if (tab->table) { tab->table->file->pushed_cond= NULL; - if (((thd->variables.optimizer_switch & - OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN) || - (tab->table->file->ha_table_flags() & - HA_MUST_USE_TABLE_CONDITION_PUSHDOWN)) && - !first_inner_tab) + if (thd->use_cond_push(tab->table->file) && !first_inner_tab) { COND *push_cond= make_cond_for_table(thd, tmp, current_map, current_map, @@ -9986,7 +9997,7 @@ bool generate_derived_keys(DYNAMIC_ARRAY *keyuse_array) void JOIN::drop_unused_derived_keys() { JOIN_TAB *tab; - for (tab= first_linear_tab(this, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { @@ -10674,7 +10685,7 @@ void check_join_cache_usage_for_tables(JOIN *join, ulonglong options, JOIN_TAB *tab; JOIN_TAB *prev_tab; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -10682,7 +10693,7 @@ void check_join_cache_usage_for_tables(JOIN *join, ulonglong options, } uint idx= join->const_tables; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -10856,7 +10867,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) tab->partial_join_cardinality= 1; JOIN_TAB *prev_tab= NULL; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES), i= join->const_tables; + i= join->const_tables; + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; prev_tab=tab, tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -10881,7 +10893,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) check_join_cache_usage_for_tables(join, options, no_jbuf_after); JOIN_TAB *first_tab; - for (tab= first_tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -11575,7 +11587,8 @@ void JOIN::cleanup(bool full) } if (full) { - JOIN_TAB *sort_tab= first_linear_tab(this, WITHOUT_CONST_TABLES); + JOIN_TAB *sort_tab= first_linear_tab(this, WITH_BUSH_ROOTS, + WITHOUT_CONST_TABLES); if (pre_sort_join_tab) { if (sort_tab && sort_tab->select == pre_sort_join_tab->select) @@ -11622,7 +11635,7 @@ void JOIN::cleanup(bool full) } else { - for (tab= first_linear_tab(this, WITH_CONST_TABLES); tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { if (tab->table) @@ -11784,7 +11797,9 @@ only_eq_ref_tables(JOIN *join,ORDER *order,table_map tables) static void update_depend_map(JOIN *join) { - for (JOIN_TAB *join_tab= first_linear_tab(join, WITH_CONST_TABLES); join_tab; + JOIN_TAB *join_tab; + for (join_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITH_CONST_TABLES); + join_tab; join_tab= next_linear_tab(join, join_tab, WITH_BUSH_ROOTS)) { TABLE_REF *ref= &join_tab->ref; @@ -23559,11 +23574,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, { const COND *pushed_cond= tab->table->file->pushed_cond; - if (((thd->variables.optimizer_switch & - OPTIMIZER_SWITCH_ENGINE_CONDITION_PUSHDOWN) || - (tab->table->file->ha_table_flags() & - HA_MUST_USE_TABLE_CONDITION_PUSHDOWN)) && - pushed_cond) + if (thd->use_cond_push(tab->table->file) && pushed_cond) { eta->push_extra(ET_USING_WHERE_WITH_PUSHED_CONDITION); /* diff --git a/sql/sql_select.h b/sql/sql_select.h index dc86825e8e9..490d8c91a9e 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1500,7 +1500,9 @@ private: enum enum_with_bush_roots { WITH_BUSH_ROOTS, WITHOUT_BUSH_ROOTS}; enum enum_with_const_tables { WITH_CONST_TABLES, WITHOUT_CONST_TABLES}; -JOIN_TAB *first_linear_tab(JOIN *join, enum enum_with_const_tables const_tbls); +JOIN_TAB *first_linear_tab(JOIN *join, + enum enum_with_bush_roots include_bush_roots, + enum enum_with_const_tables const_tbls); JOIN_TAB *next_linear_tab(JOIN* join, JOIN_TAB* tab, enum enum_with_bush_roots include_bush_roots); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 026b767d5a3..16fd1807b34 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -120,8 +120,9 @@ static void get_cs_converted_string_value(THD *thd, bool use_hex); #endif -static void -append_algorithm(TABLE_LIST *table, String *buff); +static int show_create_view(THD *thd, TABLE_LIST *table, String *buff); + +static void append_algorithm(TABLE_LIST *table, String *buff); static COND * make_cond_for_info_schema(COND *cond, TABLE_LIST *table); @@ -1067,9 +1068,8 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) buffer.set_charset(table_list->view_creation_ctx->get_client_cs()); if ((table_list->view ? - view_store_create_info(thd, table_list, &buffer) : - store_create_info(thd, table_list, &buffer, NULL, - FALSE /* show_database */, FALSE))) + show_create_view(thd, table_list, &buffer) : + show_create_table(thd, table_list, &buffer, NULL, WITHOUT_DB_NAME))) goto exit; if (table_list->view) @@ -1523,13 +1523,34 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value, @param thd thread handler @param packet string to append @param opt list of options + @param check_options only print known options + @param rules list of known options */ static void append_create_options(THD *thd, String *packet, - engine_option_value *opt) + engine_option_value *opt, + bool check_options, + ha_create_table_option *rules) { + bool in_comment= false; for(; opt; opt= opt->next) { + if (check_options) + { + if (is_engine_option_known(opt, rules)) + { + if (in_comment) + packet->append(STRING_WITH_LEN(" */")); + in_comment= false; + } + else + { + if (!in_comment) + packet->append(STRING_WITH_LEN(" /*")); + in_comment= true; + } + } + DBUG_ASSERT(opt->value.str); packet->append(' '); append_identifier(thd, packet, opt->name.str, opt->name.length); @@ -1539,13 +1560,15 @@ static void append_create_options(THD *thd, String *packet, else packet->append(opt->value.str, opt->value.length); } + if (in_comment) + packet->append(STRING_WITH_LEN(" */")); } /* Build a CREATE TABLE statement for a table. SYNOPSIS - store_create_info() + show_create_table() thd The thread table_list A list containing one table to write statement for. @@ -1555,8 +1578,7 @@ static void append_create_options(THD *thd, String *packet, to tailor the format of the statement. Can be NULL, in which case only SQL_MODE is considered when building the statement. - show_database Add database name to table name - create_or_replace Use CREATE OR REPLACE syntax + with_db_name Add database name to table name NOTE Currently always return 0, but might return error code in the @@ -1566,9 +1588,9 @@ static void append_create_options(THD *thd, String *packet, 0 OK */ -int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, - HA_CREATE_INFO *create_info_arg, bool show_database, - bool create_or_replace) +int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, + HA_CREATE_INFO *create_info_arg, + enum_with_db_name with_db_name) { List<Item> field_list; char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], def_value_buf[MAX_FIELD_WIDTH]; @@ -1582,27 +1604,35 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, handler *file= table->file; TABLE_SHARE *share= table->s; HA_CREATE_INFO create_info; -#ifdef WITH_PARTITION_STORAGE_ENGINE - bool show_table_options= FALSE; -#endif /* WITH_PARTITION_STORAGE_ENGINE */ - bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | - MODE_ORACLE | - MODE_MSSQL | - MODE_DB2 | - MODE_MAXDB | - MODE_ANSI)) != 0; - bool limited_mysql_mode= (thd->variables.sql_mode & (MODE_NO_FIELD_OPTIONS | - MODE_MYSQL323 | - MODE_MYSQL40)) != 0; + sql_mode_t sql_mode= thd->variables.sql_mode; + bool foreign_db_mode= sql_mode & (MODE_POSTGRESQL | MODE_ORACLE | + MODE_MSSQL | MODE_DB2 | + MODE_MAXDB | MODE_ANSI); + bool limited_mysql_mode= sql_mode & (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 | + MODE_MYSQL40); + bool show_table_options= !(sql_mode & MODE_NO_TABLE_OPTIONS) && + !foreign_db_mode; + bool check_options= !(sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) && + !create_info_arg; + handlerton *hton; my_bitmap_map *old_map; int error= 0; - DBUG_ENTER("store_create_info"); + DBUG_ENTER("show_create_table"); DBUG_PRINT("enter",("table: %s", table->s->table_name.str)); +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (table->part_info) + hton= table->part_info->default_engine_type; + else +#endif + hton= file->ht; + restore_record(table, s->default_values); // Get empty record packet->append(STRING_WITH_LEN("CREATE ")); - if (create_or_replace) + if (create_info_arg && + (create_info_arg->org_options & HA_LEX_CREATE_REPLACE || + create_info_arg->table_was_deleted)) packet->append(STRING_WITH_LEN("OR REPLACE ")); if (share->tmp_table) packet->append(STRING_WITH_LEN("TEMPORARY ")); @@ -1629,7 +1659,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, avoid having to update gazillions of tests and result files, but it also saves a few bytes of the binary log. */ - if (show_database) + if (with_db_name == WITH_DB_NAME) { const LEX_STRING *const db= table_list->schema_table ? &INFORMATION_SCHEMA_NAME : &table->s->db; @@ -1668,8 +1698,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, field->sql_type(type); packet->append(type.ptr(), type.length(), system_charset_info); - if (field->has_charset() && - !(thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) + if (field->has_charset() && !(sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) { if (field->charset() != share->table_charset) { @@ -1726,7 +1755,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (field->unireg_check == Field::NEXT_NUMBER && - !(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS)) + !(sql_mode & MODE_NO_FIELD_OPTIONS)) packet->append(STRING_WITH_LEN(" AUTO_INCREMENT")); if (field->comment.length) @@ -1734,7 +1763,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" COMMENT ")); append_unescaped(packet, field->comment.str, field->comment.length); } - append_create_options(thd, packet, field->option_list); + append_create_options(thd, packet, field->option_list, check_options, + hton->field_options); } key_info= table->key_info; @@ -1801,7 +1831,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, append_identifier(thd, packet, parser_name->str, parser_name->length); packet->append(STRING_WITH_LEN(" */ ")); } - append_create_options(thd, packet, key_info->option_list); + append_create_options(thd, packet, key_info->option_list, check_options, + hton->index_options); } /* @@ -1816,12 +1847,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, } packet->append(STRING_WITH_LEN("\n)")); - if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode) + if (show_table_options) { -#ifdef WITH_PARTITION_STORAGE_ENGINE - show_table_options= TRUE; -#endif /* WITH_PARTITION_STORAGE_ENGINE */ - /* IF check_create_info THEN add ENGINE only if it was used when creating the table @@ -1829,19 +1856,11 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (!create_info_arg || (create_info_arg->used_fields & HA_CREATE_USED_ENGINE)) { - if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) + if (sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) packet->append(STRING_WITH_LEN(" TYPE=")); else packet->append(STRING_WITH_LEN(" ENGINE=")); -#ifdef WITH_PARTITION_STORAGE_ENGINE - if (table->part_info) - packet->append(ha_resolve_storage_engine_name( - table->part_info->default_engine_type)); - else - packet->append(file->table_type()); -#else - packet->append(file->table_type()); -#endif + packet->append(hton_name(hton)); } /* @@ -1863,9 +1882,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(buff, (uint) (end - buff)); } - if (share->table_charset && - !(thd->variables.sql_mode & MODE_MYSQL323) && - !(thd->variables.sql_mode & MODE_MYSQL40)) + if (share->table_charset && !(sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) { /* IF check_create_info @@ -1966,7 +1983,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" CONNECTION=")); append_unescaped(packet, share->connect_string.str, share->connect_string.length); } - append_create_options(thd, packet, share->option_list); + append_create_options(thd, packet, share->option_list, check_options, + hton->table_options); append_directory(thd, packet, "DATA", create_info.data_file_name); append_directory(thd, packet, "INDEX", create_info.index_file_name); } @@ -2118,8 +2136,7 @@ void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user, } -int -view_store_create_info(THD *thd, TABLE_LIST *table, String *buff) +static int show_create_view(THD *thd, TABLE_LIST *table, String *buff) { my_bool compact_view_name= TRUE; my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | @@ -5177,7 +5194,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, str.qs_append(STRING_WITH_LEN(" transactional=")); str.qs_append(ha_choice_values[(uint) share->transactional]); } - append_create_options(thd, &str, share->option_list); + append_create_options(thd, &str, share->option_list, false, 0); if (str.length()) table->field[19]->store(str.ptr()+1, str.length()-1, cs); @@ -8092,8 +8109,9 @@ bool get_schema_tables_result(JOIN *join, Warnings_only_error_handler err_handler; thd->push_internal_handler(&err_handler); old_proc_info= thd_proc_info(thd, "Filling schema table"); - - for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES); + + JOIN_TAB *tab; + for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS)) { diff --git a/sql/sql_show.h b/sql/sql_show.h index 708a77d74cd..ce7a9110cca 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -74,10 +74,10 @@ typedef struct system_status_var STATUS_VAR; #define IS_FILES_STATUS 36 #define IS_FILES_EXTRA 37 -int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, - HA_CREATE_INFO *create_info_arg, bool show_database, - bool create_or_replace); -int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff); +typedef enum { WITHOUT_DB_NAME, WITH_DB_NAME } enum_with_db_name; +int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, + HA_CREATE_INFO *create_info_arg, + enum_with_db_name with_db_name); int copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table); diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 67e7a9c304b..9acd3d98322 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -184,7 +184,7 @@ private: public: inline void init(THD *thd, Field * table_field); - inline void add(ha_rows rowno); + inline bool add(ha_rows rowno); inline void finish(ha_rows rows); inline void cleanup(); }; @@ -1550,6 +1550,7 @@ public: uint key_parts= table->actual_n_key_parts(key_info); empty= TRUE; prefixes= 0; + LINT_INIT(calc_state); is_single_comp_pk= FALSE; uint pk= table->s->primary_key; @@ -2218,9 +2219,10 @@ void Column_statistics_collected::init(THD *thd, Field *table_field) */ inline -void Column_statistics_collected::add(ha_rows rowno) +bool Column_statistics_collected::add(ha_rows rowno) { + bool err= 0; if (column->is_null()) nulls++; else @@ -2231,8 +2233,9 @@ void Column_statistics_collected::add(ha_rows rowno) if (max_value && column->update_max(max_value, rowno == nulls)) set_not_null(COLUMN_STAT_MAX_VALUE); if (count_distinct) - count_distinct->add(); + err= count_distinct->add(); } + return err; } @@ -2486,8 +2489,11 @@ int collect_statistics_for_table(THD *thd, TABLE *table) table_field= *field_ptr; if (!bitmap_is_set(table->read_set, table_field->field_index)) continue; - table_field->collected_stats->add(rows); + if ((rc= table_field->collected_stats->add(rows))) + break; } + if (rc) + break; rows++; } file->ha_rnd_end(); @@ -2517,7 +2523,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table) else table_field->collected_stats->cleanup(); } -bitmap_clear_all(table->write_set); + bitmap_clear_all(table->write_set); if (!rc) { diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h index 331e3559203..c399951b828 100644 --- a/sql/sql_statistics.h +++ b/sql/sql_statistics.h @@ -147,7 +147,7 @@ private: case SINGLE_PREC_HB: return (uint) (((uint8 *) values)[i]); case DOUBLE_PREC_HB: - return (uint) (((uint16 *) values)[i]); + return (uint) uint2korr(values + i * 2); } return 0; } @@ -214,7 +214,7 @@ public: ((uint8 *) values)[i]= (uint8) (val * prec_factor()); return; case DOUBLE_PREC_HB: - ((uint16 *) values)[i]= (uint16) (val * prec_factor()); + int2store(values + i * 2, val * prec_factor()); return; } } @@ -226,7 +226,7 @@ public: ((uint8 *) values)[i]= ((uint8 *) values)[i-1]; return; case DOUBLE_PREC_HB: - ((uint16 *) values)[i]= ((uint16 *) values)[i-1]; + int2store(values + i * 2, uint2korr(values + i * 2 - 2)); return; } } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index a8e17cf3276..1a4f8fce158 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2014, Oracle and/or its affiliates. - Copyright (c) 2010, 2014, Monty Program Ab. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -75,6 +75,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field); static bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *); static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *, uint *, handler *, KEY **, uint *, int); +static uint blob_length_by_type(enum_field_types type); /** @brief Helper function for explain_filename @@ -3812,7 +3813,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, CHARSET_INFO *ft_key_charset=0; // for FULLTEXT for (uint column_nr=0 ; (column=cols++) ; column_nr++) { - uint length; Key_part_spec *dup_column; it.rewind(); @@ -3890,7 +3890,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, } if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type == Field::GEOM_POINT) - column->length= 25; + column->length= MAX_LEN_GEOM_POINT_FIELD; if (!column->length) { my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str); @@ -3956,30 +3956,31 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, key_part_info->fieldnr= field; key_part_info->offset= (uint16) sql_field->offset; key_part_info->key_type=sql_field->pack_flag; - length= sql_field->key_length; + uint key_part_length= sql_field->key_length; if (column->length) { if (f_is_blob(sql_field->pack_flag)) { - if ((length=column->length) > max_key_length || - length > file->max_key_part_length()) + key_part_length= MY_MIN(column->length, + blob_length_by_type(sql_field->sql_type) + * sql_field->charset->mbmaxlen); + if (key_part_length > max_key_length || + key_part_length > file->max_key_part_length()) { - length=MY_MIN(max_key_length, file->max_key_part_length()); + key_part_length= MY_MIN(max_key_length, file->max_key_part_length()); if (key->type == Key::MULTIPLE) { /* not a critical problem */ - char warn_buff[MYSQL_ERRMSG_SIZE]; - my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), - length); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, - ER_TOO_LONG_KEY, warn_buff); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_TOO_LONG_KEY, ER(ER_TOO_LONG_KEY), + key_part_length); /* Align key length to multibyte char boundary */ - length-= length % sql_field->charset->mbmaxlen; + key_part_length-= key_part_length % sql_field->charset->mbmaxlen; } else { - my_error(ER_TOO_LONG_KEY,MYF(0),length); + my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length); DBUG_RETURN(TRUE); } } @@ -3987,9 +3988,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, // Catch invalid use of partial keys else if (!f_is_geom(sql_field->pack_flag) && // is the key partial? - column->length != length && + column->length != key_part_length && // is prefix length bigger than field length? - (column->length > length || + (column->length > key_part_length || // can the field have a partial key? !Field::type_can_have_key_part (sql_field->sql_type) || // a packed field can't be used in a partial key @@ -3998,44 +3999,43 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) && // and is this a 'unique' key? (key_info->flags & HA_NOSAME)))) - { + { my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0)); DBUG_RETURN(TRUE); } else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS)) - length=column->length; + key_part_length= column->length; } - else if (length == 0 && (sql_field->flags & NOT_NULL_FLAG)) + else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG)) { my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(), column->field_name.str); DBUG_RETURN(TRUE); } - if (length > file->max_key_part_length() && key->type != Key::FULLTEXT) + if (key_part_length > file->max_key_part_length() && + key->type != Key::FULLTEXT) { - length= file->max_key_part_length(); + key_part_length= file->max_key_part_length(); if (key->type == Key::MULTIPLE) { /* not a critical problem */ - char warn_buff[MYSQL_ERRMSG_SIZE]; - my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), - length); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, - ER_TOO_LONG_KEY, warn_buff); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_TOO_LONG_KEY, ER(ER_TOO_LONG_KEY), + key_part_length); /* Align key length to multibyte char boundary */ - length-= length % sql_field->charset->mbmaxlen; + key_part_length-= key_part_length % sql_field->charset->mbmaxlen; } else { - my_error(ER_TOO_LONG_KEY,MYF(0),length); + my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length); DBUG_RETURN(TRUE); } } - key_part_info->length=(uint16) length; + key_part_info->length= (uint16) key_part_length; /* Use packed keys for long strings on the first column */ if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) && !((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) && - (length >= KEY_DEFAULT_PACK_LENGTH && + (key_part_length >= KEY_DEFAULT_PACK_LENGTH && (sql_field->sql_type == MYSQL_TYPE_STRING || sql_field->sql_type == MYSQL_TYPE_VARCHAR || sql_field->pack_flag & FIELDFLAG_BLOB))) @@ -4047,10 +4047,10 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, key_info->flags|= HA_PACK_KEY; } /* Check if the key segment is partial, set the key flag accordingly */ - if (length != sql_field->key_length) + if (key_part_length != sql_field->key_length) key_info->flags|= HA_KEY_HAS_PART_KEY_SEG; - key_length+=length; + key_length+= key_part_length; key_part_info++; /* Create the key name based on the first column (if not given) */ @@ -4971,7 +4971,7 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, const char *db= create_table->db; const char *table_name= create_table->table_name; bool is_trans= FALSE; - bool result= 0; + bool result; int create_table_mode; TABLE_LIST *pos_in_locked_tables= 0; MDL_ticket *mdl_ticket= 0; @@ -4979,8 +4979,16 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, DBUG_ASSERT(create_table == thd->lex->query_tables); + /* Copy temporarily the statement flags to thd for lock_table_names() */ + uint save_thd_create_info_options= thd->lex->create_info.options; + thd->lex->create_info.options|= create_info->options; + /* Open or obtain an exclusive metadata lock on table being created */ - if (open_and_lock_tables(thd, create_table, FALSE, 0)) + result= open_and_lock_tables(thd, create_table, FALSE, 0); + + thd->lex->create_info.options= save_thd_create_info_options; + + if (result) { /* is_error() may be 0 if table existed and we generated a warning */ DBUG_RETURN(thd->is_error()); @@ -5262,6 +5270,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, bool do_logging= FALSE; uint not_used; int create_res; + uint save_thd_create_info_options; DBUG_ENTER("mysql_create_like_table"); #ifdef WITH_WSREP @@ -5305,7 +5314,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, String query(buf, sizeof(buf), system_charset_info); query.length(0); // Have to zero it since constructor doesn't - (void) store_create_info(thd, &tbl, &query, NULL, TRUE, FALSE); + (void) show_create_table(thd, &tbl, &query, NULL, WITH_DB_NAME); WSREP_DEBUG("TMP TABLE: %s", query.ptr()); thd->wsrep_TOI_pre_query= query.ptr(); @@ -5330,7 +5339,14 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, Thus by holding both these locks we ensure that our statement is properly isolated from all concurrent operations which matter. */ - if (open_tables(thd, &thd->lex->query_tables, ¬_used, 0)) + + /* Copy temporarily the statement flags to thd for lock_table_names() */ + save_thd_create_info_options= thd->lex->create_info.options; + thd->lex->create_info.options|= create_info->options; + res= open_tables(thd, &thd->lex->query_tables, ¬_used, 0); + thd->lex->create_info.options= save_thd_create_info_options; + + if (res) { res= thd->is_error(); goto err; @@ -5489,7 +5505,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, table->open_strategy= TABLE_LIST::OPEN_NORMAL; /* - In order for store_create_info() to work we need to open + In order for show_create_table() to work we need to open destination table if it is not already open (i.e. if it has not existed before). We don't need acquire metadata lock in order to do this as we already hold exclusive @@ -5513,13 +5529,9 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, if (!table->view) { int result __attribute__((unused))= - store_create_info(thd, table, &query, - create_info, FALSE /* show_database */, - MY_TEST(create_info->org_options & - HA_LEX_CREATE_REPLACE) || - create_info->table_was_deleted); + show_create_table(thd, table, &query, create_info, WITHOUT_DB_NAME); - DBUG_ASSERT(result == 0); // store_create_info() always return 0 + DBUG_ASSERT(result == 0); // show_create_table() always return 0 do_logging= FALSE; if (write_bin_log(thd, TRUE, query.ptr(), query.length())) { diff --git a/sql/sql_table.h b/sql/sql_table.h index 444626e0363..2b383623873 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -117,6 +117,9 @@ enum enum_explain_filename_mode EXPLAIN_PARTITIONS_AS_COMMENT }; +/* Maximum length of GEOM_POINT Field */ +#define MAX_LEN_GEOM_POINT_FIELD 25 + /* depends on errmsg.txt Database `db`, Table `t` ... */ #define EXPLAIN_FILENAME_MAX_EXTRA_LENGTH 63 diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 9d068e464f5..fe8bb7a6620 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1,4 +1,5 @@ -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/sql_update.cc b/sql/sql_update.cc index f437bef15dd..db6fe42db1c 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1435,11 +1435,15 @@ int mysql_multi_update_prepare(THD *thd) another table instance used by this statement which is going to be write-locked (for example, trigger to be invoked might try to update this table). + Last argument routine_modifies_data for read_lock_type_for_table() + is ignored, as prelocking placeholder will never be set here. */ + DBUG_ASSERT(tl->prelocking_placeholder == false); + thr_lock_type lock_type= read_lock_type_for_table(thd, lex, tl, true); if (using_lock_tables) - tl->lock_type= read_lock_type_for_table(thd, lex, tl); + tl->lock_type= lock_type; else - tl->set_lock_type(thd, read_lock_type_for_table(thd, lex, tl)); + tl->set_lock_type(thd, lock_type); tl->updating= 0; } } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index a18193c6eb6..07169f299d7 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -400,9 +400,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, TABLE_LIST *tables= lex->query_tables; TABLE_LIST *tbl; SELECT_LEX *select_lex= &lex->select_lex; -#ifndef NO_EMBEDDED_ACCESS_CHECKS SELECT_LEX *sl; -#endif SELECT_LEX_UNIT *unit= &lex->unit; bool res= FALSE; DBUG_ENTER("mysql_create_view"); @@ -547,7 +545,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, } /* Check if the auto generated column names are conforming. */ - make_valid_column_names(select_lex->item_list); + for (sl= select_lex; sl; sl= sl->next_select()) + make_valid_column_names(sl->item_list); if (check_duplicate_names(select_lex->item_list, 1)) { @@ -624,7 +623,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, if (!res) tdc_remove_table(thd, TDC_RT_REMOVE_ALL, view->db, view->table_name, false); - if (mysql_bin_log.is_open()) + if (!res && mysql_bin_log.is_open()) { String buff; const LEX_STRING command[3]= diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a77d8478852..d6f4e2e94ee 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -76,7 +76,7 @@ int yylex(void *yylval, void *yythd); ulong val= *(F); \ if (my_yyoverflow((B), (D), &val)) \ { \ - yyerror(current_thd, (char*) (A)); \ + yyerror(thd, (char*) (A)); \ return 2; \ } \ else \ @@ -6296,7 +6296,8 @@ spatial_type: | GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; } | POINT_SYM { - Lex->length= (char*)"25"; + Lex->length= const_cast<char*>(STRINGIFY_ARG + (MAX_LEN_GEOM_POINT_FIELD)); $$= Field::GEOM_POINT; } | MULTIPOINT { $$= Field::GEOM_MULTIPOINT; } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 93dcc54bad9..1ce485bc53f 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2002, 2013, Oracle and/or its affiliates. +/* Copyright (c) 2002, 2014, Oracle and/or its affiliates. Copyright (c) 2012, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify @@ -1080,6 +1080,17 @@ static Sys_var_keycache Sys_key_cache_age_threshold( BLOCK_SIZE(100), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(change_keycache_param)); +static Sys_var_keycache Sys_key_cache_file_hash_size( + "key_cache_file_hash_size", + "Number of hash buckets for open and changed files. If you have a lot of MyISAM " + "files open you should increase this for faster flush of changes. A good " + "value is probably 1/10 of number of possible open MyISAM files.", + KEYCACHE_VAR(changed_blocks_hash_size), + CMD_LINE(REQUIRED_ARG, OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE), + VALID_RANGE(128, 16384), DEFAULT(512), + BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), + ON_UPDATE(resize_keycache)); + static Sys_var_mybool Sys_large_files_support( "large_files_support", "Whether mysqld was compiled with options for large file support", @@ -3253,9 +3264,10 @@ static Sys_var_ulonglong Sys_tmp_table_size( static Sys_var_mybool Sys_timed_mutexes( "timed_mutexes", - "Specify whether to time mutexes (only InnoDB mutexes are currently " - "supported)", - GLOBAL_VAR(timed_mutexes), CMD_LINE(OPT_ARG), DEFAULT(0)); + "Specify whether to time mutexes. Deprecated, has no effect.", + GLOBAL_VAR(timed_mutexes), CMD_LINE(OPT_ARG), DEFAULT(0), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), ON_UPDATE(NULL), + DEPRECATED("")); static char *server_version_ptr; static Sys_var_charptr Sys_version( @@ -4260,11 +4272,11 @@ static Sys_var_uint Sys_slave_net_timeout( Return 0 + warning if it doesn't exist */ -uint Sys_var_multi_source_ulong:: -get_master_info_uint_value(THD *thd, ptrdiff_t offset) +ulong Sys_var_multi_source_ulong:: +get_master_info_ulong_value(THD *thd, ptrdiff_t offset) { Master_info *mi; - uint res= 0; // Default value + ulong res= 0; // Default value mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index-> @@ -4273,7 +4285,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset) if (mi) { mysql_mutex_lock(&mi->rli.data_lock); - res= *((uint*) (((uchar*) mi) + master_info_offset)); + res= *((ulong*) (((uchar*) mi) + master_info_offset)); mysql_mutex_unlock(&mi->rli.data_lock); } mysql_mutex_unlock(&LOCK_active_mi); @@ -4311,11 +4323,6 @@ bool update_multi_source_variable(sys_var *self_var, THD *thd, static bool update_slave_skip_counter(sys_var *self, THD *thd, Master_info *mi) { - if (mi->using_gtid != Master_info::USE_GTID_NO) - { - my_error(ER_SLAVE_SKIP_NOT_IN_GTID, MYF(0)); - return true; - } if (mi->rli.slave_running) { my_error(ER_SLAVE_MUST_STOP, MYF(0), mi->connection_name.length, diff --git a/sql/sys_vars.h b/sql/sys_vars.h index 495099b9c59..e7f9cf8a886 100644 --- a/sql/sys_vars.h +++ b/sql/sys_vars.h @@ -2012,7 +2012,7 @@ public: ptrdiff_t off, size_t size, CMD_LINE getopt, ptrdiff_t master_info_offset_arg, - uint min_val, uint max_val, uint def_val, + ulong min_val, ulong max_val, ulong def_val, uint block_size, on_multi_source_update_function on_update_func) :Sys_var_ulong(name_arg, comment, flag_args, off, size, @@ -2024,7 +2024,7 @@ public: } bool session_update(THD *thd, set_var *var) { - session_var(thd, uint)= (uint) (var->save_result.ulonglong_value); + session_var(thd, ulong)= (ulong) (var->save_result.ulonglong_value); /* Value should be moved to multi_master in on_update_func */ return false; } @@ -2039,9 +2039,9 @@ public: } uchar *session_value_ptr(THD *thd,LEX_STRING *base) { - uint *tmp, res; - tmp= (uint*) (((uchar*)&(thd->variables)) + offset); - res= get_master_info_uint_value(thd, master_info_offset); + ulong *tmp, res; + tmp= (ulong*) (((uchar*)&(thd->variables)) + offset); + res= get_master_info_ulong_value(thd, master_info_offset); *tmp= res; return (uchar*) tmp; } @@ -2049,7 +2049,7 @@ public: { return session_value_ptr(thd, base); } - uint get_master_info_uint_value(THD *thd, ptrdiff_t offset); + ulong get_master_info_ulong_value(THD *thd, ptrdiff_t offset); bool update_variable(THD *thd, Master_info *mi) { return update_multi_source_variable_func(this, thd, mi); diff --git a/sql/table.cc b/sql/table.cc index 6b3e0bcc4b7..aca80b73afd 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1777,13 +1777,25 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, key_part= keyinfo->key_part; for (i=0 ; i < keyinfo->user_defined_key_parts ;i++) { - uint fieldnr= key_part[i].fieldnr; - if (!fieldnr || - share->field[fieldnr-1]->null_ptr || - share->field[fieldnr-1]->key_length() != - key_part[i].length) + DBUG_ASSERT(key_part[i].fieldnr > 0); + // Table field corresponding to the i'th key part. + Field *table_field= share->field[key_part[i].fieldnr - 1]; + + /* + If the key column is of NOT NULL BLOB type, then it + will definitly have key prefix. And if key part prefix size + is equal to the BLOB column max size, then we can promote + it to primary key. + */ + if (!table_field->real_maybe_null() && + table_field->type() == MYSQL_TYPE_BLOB && + table_field->field_length == key_part[i].length) + continue; + + if (table_field->real_maybe_null() || + table_field->key_length() != key_part[i].length) { - primary_key=MAX_KEY; // Can't be used + primary_key= MAX_KEY; // Can't be used break; } } @@ -4213,7 +4225,8 @@ bool TABLE_LIST::create_field_translation(THD *thd) while ((item= it++)) { - transl[field_count].name= item->name; + DBUG_ASSERT(item->name && item->name[0]); + transl[field_count].name= thd->strdup(item->name); transl[field_count++].item= item; } field_translation= transl; diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 8b768240b4f..097f37d26d8 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -267,7 +267,7 @@ void tc_add_table(THD *thd, TABLE *table) TABLE_SHARE *purge_share= 0; TABLE_SHARE *share; TABLE *entry; - ulonglong purge_time; + ulonglong UNINIT_VAR(purge_time); TDC_iterator tdc_it; tdc_it.init(); diff --git a/sql/unireg.cc b/sql/unireg.cc index b7ac8b17c38..c60a13e5f44 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -211,6 +211,12 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table, filepos= frm.length; frm.length+= FRM_FORMINFO_SIZE; // forminfo frm.length+= packed_fields_length(create_fields); + + if (frm.length > FRM_MAX_SIZE) + { + my_error(ER_TABLE_DEFINITION_TOO_BIG, MYF(0), table); + DBUG_RETURN(frm); + } frm_ptr= (uchar*) my_malloc(frm.length, MYF(MY_WME | MY_ZEROFILL | MY_THREAD_SPECIFIC)); diff --git a/sql/unireg.h b/sql/unireg.h index 9b40b7b0779..5f133da674f 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -203,7 +203,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table, #define FRM_HEADER_SIZE 64 #define FRM_FORMINFO_SIZE 288 -#define FRM_MAX_SIZE (256*1024) +#define FRM_MAX_SIZE (512*1024) static inline bool is_binary_frm_header(uchar *head) { diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index f8db000a486..40d38fdc3b5 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -19,20 +19,20 @@ SET(CONNECT_PLUGIN_DYNAMIC "connect") SET(CONNECT_SOURCES ha_connect.cc connect.cc user_connect.cc mycat.cc fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h -csort.cpp maputil.cpp plgdbutl.cpp -colblk.cpp reldef.cpp tabcol.cpp table.cpp -filamap.cpp filamdbf.cpp filamfix.cpp filamtxt.cpp filamvct.cpp -tabdos.cpp tabfix.cpp tabfmt.cpp tabmul.cpp tabsys.cpp tabvct.cpp +array.cpp blkfil.cpp colblk.cpp csort.cpp +filamap.cpp filamdbf.cpp filamfix.cpp filamtxt.cpp filamvct.cpp filamzip.cpp +filter.cpp maputil.cpp myutil.cpp plgdbutl.cpp reldef.cpp tabcol.cpp +tabdos.cpp tabfix.cpp tabfmt.cpp table.cpp tabmul.cpp taboccur.cpp +tabpivot.cpp tabsys.cpp tabtbl.cpp tabutil.cpp tabvct.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp -filamzip.cpp tabtbl.cpp myutil.cpp -tabutil.cpp tabxcl.cpp taboccur.cpp tabpivot.cpp -block.h catalog.h checklvl.h colblk.h connect.h csort.h engmsg.h -filamap.h filamdbf.h filamfix.h filamtxt.h filamvct.h filamzip.h -global.h ha_connect.h inihandl.h maputil.h msgid.h mycat.h myutil.h os.h -osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h resource.h tabcol.h -tabdos.h tabfix.h tabfmt.h tabmul.h tabsys.h tabtbl.h tabvct.h -user_connect.h valblk.h value.h xindex.h xobject.h xtable.h -tabutil.h tabxcl.h taboccur.h tabpivot.h) + +array.h blkfil.h block.h catalog.h checklvl.h colblk.h connect.h csort.h +engmsg.h filamap.h filamdbf.h filamfix.h filamtxt.h filamvct.h filamzip.h +filter.h global.h ha_connect.h inihandl.h maputil.h msgid.h mycat.h myutil.h +os.h osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h resource.h tabcol.h +tabdos.h tabfix.h tabfmt.h tabmul.h taboccur.h tabpivot.h tabsys.h +tabtbl.h tabutil.h tabvct.h tabxcl.h user_connect.h valblk.h value.h +xindex.h xobject.h xtable.h) # # Definitions that are shared for all OSes diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp new file mode 100644 index 00000000000..6cd175a0f0a --- /dev/null +++ b/storage/connect/array.cpp @@ -0,0 +1,1167 @@ +/************* Array C++ Functions Source Code File (.CPP) *************/ +/* Name: ARRAY.CPP Version 2.3 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ +/* */ +/* This file contains the XOBJECT derived class ARRAY functions. */ +/* ARRAY is used for elaborate type of processing, such as sorting */ +/* and dichotomic search (Find). This new version does not use sub */ +/* classes anymore for the different types but relies entirely on the */ +/* functionalities provided by the VALUE and VALBLK classes. */ +/* Currently the only supported types are STRING, SHORT, int, DATE, */ +/* TOKEN, DOUBLE, and Compressed Strings. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant MariaDB header file. */ +/***********************************************************************/ +#include "my_global.h" +#include "sql_class.h" +//#include "sql_time.h" + +#if defined(WIN32) +//#include <windows.h> +#else // !WIN32 +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#endif // !WIN32 + +/***********************************************************************/ +/* Include required application header files */ +/* global.h is header containing all global Plug declarations. */ +/* plgdbsem.h is header containing the DB applic. declarations. */ +/* xobject.h is header containing XOBJECT derived classes declares. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "xtable.h" +#include "array.h" +//#include "select.h" +//#include "query.h" +//#include "token.h" + +/***********************************************************************/ +/* Macro definitions. */ +/***********************************************************************/ +#if defined(_DEBUG) +#define ASSERT(B) assert(B); +#else +#define ASSERT(B) +#endif + +/***********************************************************************/ +/* Static variables. */ +/***********************************************************************/ +extern "C" int trace; + +/***********************************************************************/ +/* DB static external variables. */ +/***********************************************************************/ +extern MBLOCK Nmblk; /* Used to initialize MBLOCK's */ + +/***********************************************************************/ +/* External functions. */ +/***********************************************************************/ +BYTE OpBmp(PGLOBAL g, OPVAL opc); +void EncodeValue(int *lp, char *strp, int n); +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); // avoid gcc warning + +/***********************************************************************/ +/* MakeValueArray: Makes a value array from a value list. */ +/***********************************************************************/ +PARRAY MakeValueArray(PGLOBAL g, PPARM pp) + { + int n, valtyp = 0; + size_t len = 0; + PARRAY par; + PPARM parmp; + + if (!pp) + return NULL; + + /*********************************************************************/ + /* New version with values coming in a list. */ + /*********************************************************************/ + if ((valtyp = pp->Type) != TYPE_STRING) + len = 1; + + if (trace) + htrc("valtyp=%d len=%d\n", valtyp, len); + + /*********************************************************************/ + /* Firstly check the list and count the number of values in it. */ + /*********************************************************************/ + for (n = 0, parmp = pp; parmp; n++, parmp = parmp->Next) + if (parmp->Type != valtyp) { + sprintf(g->Message, MSG(BAD_PARAM_TYPE), "MakeValueArray", parmp->Type); + return NULL; + } else if (valtyp == TYPE_STRING) + len = MY_MAX(len, strlen((char*)parmp->Value)); + + /*********************************************************************/ + /* Make an array object with one block of the proper size. */ + /*********************************************************************/ + par = new(g) ARRAY(g, valtyp, n, (int)len); + + if (par->GetResultType() == TYPE_ERROR) + return NULL; // Memory allocation error in ARRAY + + /*********************************************************************/ + /* All is right now, fill the array block. */ + /*********************************************************************/ + for (parmp = pp; parmp; parmp = parmp->Next) + switch (valtyp) { + case TYPE_STRING: + par->AddValue(g, (PSZ)parmp->Value); + break; + case TYPE_SHORT: + par->AddValue(g, *(short*)parmp->Value); + break; + case TYPE_INT: + par->AddValue(g, *(int*)parmp->Value); + break; + case TYPE_DOUBLE: + par->AddValue(g, *(double*)parmp->Value); + break; + case TYPE_PCHAR: + par->AddValue(g, parmp->Value); + break; + } // endswitch valtyp + + /*********************************************************************/ + /* Send back resulting array. */ + /*********************************************************************/ + return par; + } // end of MakeValueArray + +/* -------------------------- Class ARRAY ---------------------------- */ + +/***********************************************************************/ +/* ARRAY public constructor. */ +/***********************************************************************/ +ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec) + : CSORT(FALSE) + { + Nval = 0; + Ndif = 0; + Bot = 0; + Top = 0; + Size = size; + Type = type; + Xsize = -1; + Len = 1; + + switch ((Type = type)) { + case TYPE_STRING: + Len = length; + break; + case TYPE_SHORT: + case TYPE_INT: + case TYPE_DOUBLE: + case TYPE_PCHAR: + break; +#if 0 + case TYPE_TOKEN: + break; + case TYPE_LIST: + Len = 0; + prec = length; + break; +#endif // 0 + default: // This is illegal an causes an ill formed array building + sprintf(g->Message, MSG(BAD_ARRAY_TYPE), type); + Type = TYPE_ERROR; + return; + } // endswitch type + + Valblk = new(g) MBVALS; + + if (!(Vblp = Valblk->Allocate(g, Type, Len, prec, Size))) + Type = TYPE_ERROR; + else if (!Valblk->GetMemp() && Type != TYPE_LIST) + // The error message was built by PlgDBalloc + Type = TYPE_ERROR; + else if (type != TYPE_PCHAR) + Value = AllocateValue(g, type, Len, prec, NULL); + + Constant = TRUE; + } // end of ARRAY constructor + +#if 0 +/***********************************************************************/ +/* ARRAY public constructor from a QUERY. */ +/***********************************************************************/ +ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE) + { + Type = qryp->GetColType(0); + Nval = qryp->GetNblin(); + Ndif = 0; + Bot = 0; + Top = 0; + Size = Nval; + Xsize = -1; + Len = qryp->GetColLength(0); + X = Inf = Sup = 0; + Correlated = FALSE; + + switch (Type) { + case TYPE_STRING: + case TYPE_SHORT: + case TYPE_INT: + case TYPE_DATE: + case TYPE_DOUBLE: +// case TYPE_TOKEN: +// case TYPE_LIST: +// Valblk = qryp->GetCol(0)->Result; +// Vblp = qryp->GetColBlk(0); +// Value = qryp->GetColValue(0); +// break; + default: // This is illegal an causes an ill formed array building + sprintf(g->Message, MSG(BAD_ARRAY_TYPE), Type); + Type = TYPE_ERROR; + } // endswitch type + + if (!Valblk || (!Valblk->GetMemp() && Type != TYPE_LIST)) + // The error message was built by ??? + Type = TYPE_ERROR; + + Constant = TRUE; + } // end of ARRAY constructor + +/***********************************************************************/ +/* ARRAY constructor from a TYPE_LIST subarray. */ +/***********************************************************************/ +ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(FALSE) + { + int prec; + LSTBLK *lp; + + if (par->Type != TYPE_LIST) { + Type = TYPE_ERROR; + return; + } // endif Type + + lp = (LSTBLK*)par->Vblp; + + Nval = par->Nval; + Ndif = 0; + Bot = 0; + Top = 0; + Size = par->Size; + Xsize = -1; + + Valblk = lp->Mbvk[k]; + Vblp = Valblk->Vblk; + Type = Vblp->GetType(); + Len = (Type == TYPE_STRING) ? Vblp->GetVlen() : 0; + prec = (Type == TYPE_FLOAT) ? 2 : 0; + Value = AllocateValue(g, Type, Len, prec, NULL); + Constant = TRUE; + } // end of ARRAY constructor + +/***********************************************************************/ +/* Empty: reset the array for a new use (correlated queries). */ +/* Note: this is temporary as correlated queries will not use arrays */ +/* anymore with future optimized algorithms. */ +/***********************************************************************/ +void ARRAY::Empty(void) + { + assert(Correlated); + Nval = Ndif = 0; + Bot = Top = X = Inf = Sup = 0; + } // end of Empty +#endif // 0 + +/***********************************************************************/ +/* Add a string element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, PSZ strp) + { + if (Type != TYPE_STRING) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "CHAR"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding string(%d): '%s'\n", Nval, strp); + +//Value->SetValue_psz(strp); +//Vblp->SetValue(valp, Nval++); + Vblp->SetValue(strp, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a char pointer element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, void *p) + { + if (Type != TYPE_PCHAR) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "PCHAR"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding pointer(%d): %p\n", Nval, p); + + Vblp->SetValue((PSZ)p, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a short integer element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, short n) + { + if (Type != TYPE_SHORT) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "SHORT"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding SHORT(%d): %hd\n", Nval, n); + +//Value->SetValue(n); +//Vblp->SetValue(valp, Nval++); + Vblp->SetValue(n, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add an integer element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, int n) + { + if (Type != TYPE_INT) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "INTEGER"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding int(%d): %d\n", Nval, n); + +//Value->SetValue(n); +//Vblp->SetValue(valp, Nval++); + Vblp->SetValue(n, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a double float element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, double d) + { + if (Type != TYPE_DOUBLE) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "DOUBLE"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding float(%d): %lf\n", Nval, d); + + Value->SetValue(d); + Vblp->SetValue(Value, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add the value of a XOBJECT block to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, PXOB xp) + { + if (Type != xp->GetResultType()) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), + GetTypeName(xp->GetResultType()), GetTypeName(Type)); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding (%d) from xp=%p\n", Nval, xp); + +//AddValue(xp->GetValue()); + Vblp->SetValue(xp->GetValue(), Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a value to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, PVAL vp) + { + if (Type != vp->GetType()) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), + GetTypeName(vp->GetType()), GetTypeName(Type)); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding (%d) from vp=%p\n", Nval, vp); + + Vblp->SetValue(vp, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Retrieve the nth value of the array. */ +/***********************************************************************/ +void ARRAY::GetNthValue(PVAL valp, int n) + { + valp->SetValue_pvblk(Vblp, n); + } // end of GetNthValue + +#if 0 +/***********************************************************************/ +/* Retrieve the nth subvalue of a list array. */ +/***********************************************************************/ +bool ARRAY::GetSubValue(PGLOBAL g, PVAL valp, int *kp) + { + PVBLK vblp; + + if (Type != TYPE_LIST) { + sprintf(g->Message, MSG(NO_SUB_VAL), Type); + return TRUE; + } // endif Type + + vblp = ((LSTBLK*)Vblp)->Mbvk[kp[0]]->Vblk; + valp->SetValue_pvblk(vblp, kp[1]); + return FALSE; + } // end of GetSubValue +#endif // 0 + +/***********************************************************************/ +/* Return the nth value of an integer array. */ +/***********************************************************************/ +int ARRAY::GetIntValue(int n) + { + assert (Type == TYPE_INT); + return Vblp->GetIntValue(n); + } // end of GetIntValue + +/***********************************************************************/ +/* Return the nth value of a STRING array. */ +/***********************************************************************/ +char *ARRAY::GetStringValue(int n) + { + assert (Type == TYPE_STRING || Type == TYPE_PCHAR); + return Vblp->GetCharValue(n); + } // end of GetStringValue + +/***********************************************************************/ +/* Find whether a value is in an array. */ +/* Provide a conversion limited to the Value limitation. */ +/***********************************************************************/ +bool ARRAY::Find(PVAL valp) + { + register int n; + PVAL vp; + + if (Type != valp->GetType()) { + Value->SetValue_pval(valp); + vp = Value; + } else + vp = valp; + + Inf = Bot, Sup = Top; + + while (Sup - Inf > 1) { + X = (Inf + Sup) >> 1; + n = Vblp->CompVal(vp, X); + + if (n < 0) + Sup = X; + else if (n > 0) + Inf = X; + else + return TRUE; + + } // endwhile + + return FALSE; + } // end of Find + +/***********************************************************************/ +/* ARRAY: Compare routine for a list of values. */ +/***********************************************************************/ +BYTE ARRAY::Vcompare(PVAL vp, int n) + { + Value->SetValue_pvblk(Vblp, n); + return vp->TestValue(Value); + } // end of Vcompare + +/***********************************************************************/ +/* Test a filter condition on an array depending on operator and mod. */ +/* Modificator values are 1: ANY (or SOME) and 2: ALL. */ +/***********************************************************************/ +bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm) + { + int i; + PVAL vp; + BYTE bt = OpBmp(g, opc); + int top = Nval - 1; + + if (top < 0) // Array is empty + // Return TRUE for ALL because it means that there are no item that + // does not verify the condition, which is true indeed. + // Return FALSE for ANY because TRUE means that there is at least + // one item that verifies the condition, which is false. + return opm == 2; + + if (valp) { + if (Type != valp->GetType()) { + Value->SetValue_pval(valp); + vp = Value; + } else + vp = valp; + + } else if (opc != OP_EXIST) { + sprintf(g->Message, MSG(MISSING_ARG), opc); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } else // OP_EXIST + return Nval > 0; + + if (opc == OP_IN || (opc == OP_EQ && opm == 1)) + return Find(vp); + else if (opc == OP_NE && opm == 2) + return !Find(vp); + else if (opc == OP_EQ && opm == 2) + return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : FALSE; + else if (opc == OP_NE && opm == 1) + return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : TRUE; + + if (Type != TYPE_LIST) { + if (opc == OP_GT || opc == OP_GE) + return !(Vcompare(vp, (opm == 1) ? 0 : top) & bt); + else + return !(Vcompare(vp, (opm == 2) ? 0 : top) & bt); + + } // endif Type + + // Case of TYPE_LIST + if (opm == 2) { + for (i = 0; i < Nval; i++) + if (Vcompare(vp, i) & bt) + return FALSE; + + return TRUE; + } else { // opm == 1 + for (i = 0; i < Nval; i++) + if (!(Vcompare(vp, i) & bt)) + return TRUE; + + return FALSE; + } // endif opm + + } // end of FilTest + +/***********************************************************************/ +/* Test whether this array can be converted to TYPE_SHORT. */ +/* Must be called after the array is sorted. */ +/***********************************************************************/ +bool ARRAY::CanBeShort(void) + { + int* To_Val = (int*)Valblk->GetMemp(); + + if (Type != TYPE_INT || !Ndif) + return FALSE; + + // Because the array is sorted, this is true if all the array + // int values are in the range of SHORT values + return (To_Val[0] >= -32768 && To_Val[Nval-1] < 32768); + } // end of CanBeShort + +/***********************************************************************/ +/* Convert an array to new numeric type k. */ +/* Note: conversion is always made in ascending order from STRING to */ +/* short to int to double so no precision is lost in the conversion. */ +/* One exception is converting from int to short compatible arrays. */ +/***********************************************************************/ +int ARRAY::Convert(PGLOBAL g, int k, PVAL vp) + { + int i, prec = 0; + bool b = FALSE; + PMBV ovblk = Valblk; + PVBLK ovblp = Vblp; + + Type = k; // k is the new type + Valblk = new(g) MBVALS; + + switch (Type) { + case TYPE_DOUBLE: + prec = 2; + case TYPE_SHORT: + case TYPE_INT: + case TYPE_DATE: + Len = 1; + break; + default: + sprintf(g->Message, MSG(BAD_CONV_TYPE), Type); + return TYPE_ERROR; + } // endswitch k + + Size = Nval; + Nval = 0; + Vblp = Valblk->Allocate(g, Type, Len, 0, Size); + + if (!Valblk->GetMemp()) + // The error message was built by PlgDBalloc + return TYPE_ERROR; + else + Value = AllocateValue(g, Type, Len, 0, NULL); + + /*********************************************************************/ + /* Converting STRING to DATE can be done according to date format. */ + /*********************************************************************/ + if (Type == TYPE_DATE && ovblp->GetType() == TYPE_STRING && vp) + if (((DTVAL*)Value)->SetFormat(g, vp)) + return TYPE_ERROR; + else + b = TRUE; // Sort the new array on date internal values + + /*********************************************************************/ + /* Do the actual conversion. */ + /*********************************************************************/ + for (i = 0; i < Size; i++) { + Value->SetValue_pvblk(ovblp, i); + + if (AddValue(g, Value)) + return TYPE_ERROR; + + } // endfor i + + /*********************************************************************/ + /* For sorted arrays, get the initial find values. */ + /*********************************************************************/ + if (b) + Sort(g); + + ovblk->Free(); + return Type; + } // end of Convert + +/***********************************************************************/ +/* ARRAY Save: save value at i (used while rordering). */ +/***********************************************************************/ +void ARRAY::Save(int i) + { + Value->SetValue_pvblk(Vblp, i); + } // end of Save + +/***********************************************************************/ +/* ARRAY Restore: restore value to j (used while rordering). */ +/***********************************************************************/ +void ARRAY::Restore(int j) + { + Vblp->SetValue(Value, j); + } // end of Restore + +/***********************************************************************/ +/* ARRAY Move: move value from k to j (used while rordering). */ +/***********************************************************************/ +void ARRAY::Move(int j, int k) + { + Vblp->Move(k, j); // VALBLK does the opposite !!! + } // end of Move + +/***********************************************************************/ +/* ARRAY: Compare routine for one LIST value (ascending only). */ +/***********************************************************************/ +int ARRAY::Qcompare(int *i1, int *i2) + { + return Vblp->CompVal(*i1, *i2); + } // end of Qcompare + +/***********************************************************************/ +/* Mainly meant to set the character arrays case sensitiveness. */ +/***********************************************************************/ +void ARRAY::SetPrecision(PGLOBAL g, int p) + { + if (Vblp == NULL) { + strcpy(g->Message, MSG(PREC_VBLP_NULL)); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } // endif Vblp + + bool was = Vblp->IsCi(); + + if (was && !p) { + strcpy(g->Message, MSG(BAD_SET_CASE)); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } // endif Vblp + + if (was || !p) + return; + else + Vblp->SetPrec(p); + + if (!was && Type == TYPE_STRING) + // Must be resorted to eliminate duplicate strings + if (Sort(g)) + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + + } // end of SetPrecision + +/***********************************************************************/ +/* Sort and eliminate distinct values from an array. */ +/* Note: this is done by making a sorted index on distinct values. */ +/* Returns FALSE if Ok or TRUE in case of error. */ +/***********************************************************************/ +bool ARRAY::Sort(PGLOBAL g) + { + int i, j, k; + + // This is to avoid multiply allocating for correlated subqueries + if (Nval > Xsize) { + if (Xsize >= 0) { + // Was already allocated + PlgDBfree(Index); + PlgDBfree(Offset); + } // endif Xsize + + // Prepare non conservative sort with offet values + Index.Size = Nval * sizeof(int); + + if (!PlgDBalloc(g, NULL, Index)) + goto error; + + Offset.Size = (Nval + 1) * sizeof(int); + + if (!PlgDBalloc(g, NULL, Offset)) + goto error; + + Xsize = Nval; + } // endif Nval + + // Call the sort program, it returns the number of distinct values + Ndif = Qsort(g, Nval); + + if (Ndif < 0) + goto error; + + // Use the sort index to reorder the data in storage so it will + // be physically sorted and Index can be removed. + for (i = 0; i < Nval; i++) { + if (Pex[i] == i || Pex[i] == Nval) + // Already placed or already moved + continue; + + Save(i); + + for (j = i;; j = k) { + k = Pex[j]; + Pex[j] = Nval; // Mark position as set + + if (k == i) { + Restore(j); + break; // end of loop + } else + Move(j, k); + + } // endfor j + + } // endfor i + + // Reduce the size of the To_Val array if Ndif < Nval + if (Ndif < Nval) { + for (i = 1; i < Ndif; i++) + if (i != Pof[i]) + break; + + for (; i < Ndif; i++) + Move(i, Pof[i]); + + Nval = Ndif; + } // endif ndif + +//if (!Correlated) { + if (Size > Nval) { + Size = Nval; + Valblk->ReAllocate(g, Size); + } // endif Size + + // Index and Offset are not used anymore + PlgDBfree(Index); + PlgDBfree(Offset); + Xsize = -1; +// } // endif Correlated + + Bot = -1; // For non optimized search + Top = Ndif; // Find searches the whole array. + return FALSE; + + error: + Nval = Ndif = 0; + Valblk->Free(); + PlgDBfree(Index); + PlgDBfree(Offset); + return TRUE; + } // end of Sort + +/***********************************************************************/ +/* Sort and return the sort index. */ +/* Note: This is meant if the array contains unique values. */ +/* Returns Index.Memp if Ok or NULL in case of error. */ +/***********************************************************************/ +void *ARRAY::GetSortIndex(PGLOBAL g) + { + // Prepare non conservative sort with offet values + Index.Size = Nval * sizeof(int); + + if (!PlgDBalloc(g, NULL, Index)) + goto error; + + Offset.Size = (Nval + 1) * sizeof(int); + + if (!PlgDBalloc(g, NULL, Offset)) + goto error; + + // Call the sort program, it returns the number of distinct values + Ndif = Qsort(g, Nval); + + if (Ndif < 0) + goto error; + + if (Ndif < Nval) + goto error; + + PlgDBfree(Offset); + return Index.Memp; + + error: + Nval = Ndif = 0; + Valblk->Free(); + PlgDBfree(Index); + PlgDBfree(Offset); + return NULL; + } // end of GetSortIndex + +/***********************************************************************/ +/* Block filter testing for IN operator on Column/Array operands. */ +/* Here we call Find that returns TRUE if the value is in the array */ +/* with X equal to the index of the found value in the array, or */ +/* FALSE if the value is not in the array with Inf and Sup being the */ +/* indexes of the array values that are immediately below and over */ +/* the not found value. This enables to restrict the array to the */ +/* values that are between the min and max block values and to return */ +/* the indication of whether the Find will be always true, always not */ +/* true or other. */ +/***********************************************************************/ +int ARRAY::BlockTest(PGLOBAL g, int opc, int opm, + void *minp, void *maxp, bool s) + { + bool bin, bax, pin, pax, veq, all = (opm == 2); + + if (Ndif == 0) // Array is empty + // Return TRUE for ALL because it means that there are no item that + // does not verify the condition, which is true indeed. + // Return FALSE for ANY because TRUE means that there is at least + // one item that verifies the condition, which is false. + return (all) ? 2 : -2; + else if (opc == OP_EQ && all && Ndif > 1) + return -2; + else if (opc == OP_NE && !all && Ndif > 1) + return 2; +// else if (Ndif == 1) +// all = FALSE; + + // veq is true when all values in the block are equal + switch (Type) { + case TYPE_STRING: veq = (Vblp->IsCi()) + ? !stricmp((char*)minp, (char*)maxp) + : !strcmp((char*)minp, (char*)maxp); break; + case TYPE_SHORT: veq = *(short*)minp == *(short*)maxp; break; + case TYPE_INT: veq = *(int*)minp == *(int*)maxp; break; + case TYPE_DOUBLE: veq = *(double*)minp == *(double*)maxp; break; + default: veq = FALSE; // Error ? + } // endswitch type + + if (!s) + Bot = -1; + + Top = Ndif; // Reset Top at top of list + Value->SetBinValue(maxp); + Top = (bax = Find(Value)) ? X + 1 : Sup; + + if (bax) { + if (opc == OP_EQ) + return (veq) ? 1 : 0; + else if (opc == OP_NE) + return (veq) ? -1 : 0; + + if (X == 0) switch (opc) { + // Max value is equal to min list value + case OP_LE: return 1; break; + case OP_LT: return (veq) ? -1 : 0; break; + case OP_GE: return (veq) ? 1 : 0; break; + case OP_GT: return -1; break; + } // endswitch opc + + pax = (opc == OP_GE) ? (X < Ndif - 1) : TRUE; + } else if (Inf == Bot) { + // Max value is smaller than min list value + return (opc == OP_LT || opc == OP_LE || opc == OP_NE) ? 1 : -1; + } else + pax = (Sup < Ndif); // True if max value is inside the list value + + if (!veq) { + Value->SetBinValue(minp); + bin = Find(Value); + } else + bin = bax; + + Bot = (bin) ? X - 1 : Inf; + + if (bin) { + if (opc == OP_EQ || opc == OP_NE) + return 0; + + if (X == Ndif - 1) switch (opc) { + case OP_GE: return (s) ? 2 : 1; break; + case OP_GT: return (veq) ? -1 : 0; break; + case OP_LE: return (veq) ? 1 : 0; break; + case OP_LT: return (s) ? -2 : -1; break; + } // endswitch opc + + pin = (opc == OP_LE) ? (X > 0) : TRUE; + } else if (Sup == Ndif) { + // Min value is greater than max list value + if (opc == OP_GT || opc == OP_GE || opc == OP_NE) + return (s) ? 2 : 1; + else + return (s) ? -2 : -1; + + } else + pin = (Inf >= 0); // True if min value is inside the list value + + if (Top - Bot <= 1) { + // No list item between min and max value +#if defined(_DEBUG) + assert (!bin && !bax); +#endif + switch (opc) { + case OP_EQ: return -1; break; + case OP_NE: return 1; break; + default: return (all) ? -1 : 1; break; + } // endswitch opc + + } // endif + +#if defined(_DEBUG) + assert (Ndif > 1); // if Ndif = 1 we should have returned already +#endif + + // At this point, if there are no logical errors in the algorithm, + // the only possible overlaps between the array and the block are: + // Array: +-------+ +-------+ +-------+ +-----+ + // Block: +-----+ +---+ +------+ +--------+ + // TRUE: pax pin pax pin + if (all) switch (opc) { + case OP_GT: + case OP_GE: return (pax) ? -1 : 0; break; + case OP_LT: + case OP_LE: return (pin) ? -1 : 0; break; + } // endswitch opc + + return 0; + } // end of BlockTest + +/***********************************************************************/ +/* MakeArrayList: Makes a value list from an SQL IN array (in work). */ +/***********************************************************************/ +PSZ ARRAY::MakeArrayList(PGLOBAL g) + { + char *p, *tp; + int i; + size_t z, len = 2; + + if (Type == TYPE_LIST) + return "(???)"; // To be implemented + + z = MY_MAX(24, GetTypeSize(Type, Len) + 4); + tp = (char*)PlugSubAlloc(g, NULL, z); + + for (i = 0; i < Nval; i++) { + Value->SetValue_pvblk(Vblp, i); + Value->Print(g, tp, z); + len += strlen(tp); + } // enfor i + + if (trace) + htrc("Arraylist: len=%d\n", len); + + p = (char *)PlugSubAlloc(g, NULL, len); + strcpy(p, "("); + + for (i = 0; i < Nval;) { + Value->SetValue_pvblk(Vblp, i); + Value->Print(g, tp, z); + strcat(p, tp); + strcat(p, (++i == Nval) ? ")" : ","); + } // enfor i + + if (trace) + htrc("Arraylist: newlen=%d\n", strlen(p)); + + return p; + } // end of MakeArrayList + +/***********************************************************************/ +/* Make file output of ARRAY contents. */ +/***********************************************************************/ +void ARRAY::Print(PGLOBAL g, FILE *f, uint n) + { + char m[64]; + int lim = MY_MIN(Nval,10); + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + fprintf(f, "%sARRAY: type=%d\n", m, Type); + memset(m, ' ', n + 2); // Make margin string + m[n] = '\0'; + + if (Type != TYPE_LIST) { + fprintf(f, "%sblock=%p numval=%d\n", m, Valblk->GetMemp(), Nval); + + if (Vblp) + for (int i = 0; i < lim; i++) { + Value->SetValue_pvblk(Vblp, i); + Value->Print(g, f, n+4); + } // endfor i + + } else + fprintf(f, "%sVALLST: numval=%d\n", m, Nval); + + } // end of Print + +/***********************************************************************/ +/* Make string output of ARRAY contents. */ +/***********************************************************************/ +void ARRAY::Print(PGLOBAL g, char *ps, uint z) + { + if (z < 16) + return; + + sprintf(ps, "ARRAY: type=%d\n", Type); + // More to be implemented later + } // end of Print + +/* -------------------------- Class MULAR ---------------------------- */ + +/***********************************************************************/ +/* MULAR public constructor. */ +/***********************************************************************/ +MULAR::MULAR(PGLOBAL g, int n) : CSORT(FALSE) + { + Narray = n; + Pars = (PARRAY*)PlugSubAlloc(g, NULL, n * sizeof(PARRAY)); + } // end of MULAR constructor + +/***********************************************************************/ +/* MULAR: Compare routine multiple arrays. */ +/***********************************************************************/ +int MULAR::Qcompare(int *i1, int *i2) + { + register int i, n = 0; + + for (i = 0; i < Narray; i++) + if ((n = Pars[i]->Qcompare(i1, i2))) + break; + + return n; + } // end of Qcompare + +/***********************************************************************/ +/* Sort and eliminate distinct values from multiple arrays. */ +/* Note: this is done by making a sorted index on distinct values. */ +/* Returns FALSE if Ok or TRUE in case of error. */ +/***********************************************************************/ +bool MULAR::Sort(PGLOBAL g) + { + int i, j, k, n, nval, ndif; + + // All arrays must have the same number of values + nval = Pars[0]->Nval; + + for (n = 1; n < Narray; n++) + if (Pars[n]->Nval != nval) { + strcpy(g->Message, MSG(BAD_ARRAY_VAL)); + return TRUE; + } // endif nval + + // Prepare non conservative sort with offet values + Index.Size = nval * sizeof(int); + + if (!PlgDBalloc(g, NULL, Index)) + goto error; + + Offset.Size = (nval + 1) * sizeof(int); + + if (!PlgDBalloc(g, NULL, Offset)) + goto error; + + // Call the sort program, it returns the number of distinct values + ndif = Qsort(g, nval); + + if (ndif < 0) + goto error; + + // Use the sort index to reorder the data in storage so it will + // be physically sorted and Index can be removed. + for (i = 0; i < nval; i++) { + if (Pex[i] == i || Pex[i] == nval) + // Already placed or already moved + continue; + + for (n = 0; n < Narray; n++) + Pars[n]->Save(i); + + for (j = i;; j = k) { + k = Pex[j]; + Pex[j] = nval; // Mark position as set + + if (k == i) { + for (n = 0; n < Narray; n++) + Pars[n]->Restore(j); + + break; // end of loop + } else + for (n = 0; n < Narray; n++) + Pars[n]->Move(j, k); + + } // endfor j + + } // endfor i + + // Reduce the size of the To_Val array if ndif < nval + if (ndif < nval) { + for (i = 1; i < ndif; i++) + if (i != Pof[i]) + break; + + for (; i < ndif; i++) + for (n = 0; n < Narray; n++) + Pars[n]->Move(i, Pof[i]); + + for (n = 0; n < Narray; n++) { + Pars[n]->Nval = ndif; + Pars[n]->Size = ndif; + Pars[n]->Valblk->ReAllocate(g, ndif); + } // endfor n + + } // endif ndif + + // Index and Offset are not used anymore + PlgDBfree(Index); + PlgDBfree(Offset); + + for (n = 0; n < Narray; n++) { + Pars[n]->Bot = -1; // For non optimized search + Pars[n]->Top = ndif; // Find searches the whole array. + } // endfor n + + return FALSE; + + error: + PlgDBfree(Index); + PlgDBfree(Offset); + return TRUE; + } // end of Sort diff --git a/storage/connect/array.h b/storage/connect/array.h new file mode 100644 index 00000000000..4a818414e9c --- /dev/null +++ b/storage/connect/array.h @@ -0,0 +1,130 @@ +/**************** Array H Declares Source Code File (.H) ***************/ +/* Name: ARRAY.H Version 3.1 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ +/* */ +/* This file contains the ARRAY and VALBASE derived classes declares. */ +/***********************************************************************/ +#ifndef __ARRAY_H +#define __ARRAY_H + + +/***********************************************************************/ +/* Include required application header files */ +/***********************************************************************/ +#include "xobject.h" +#include "valblk.h" +#include "csort.h" + +typedef class ARRAY *PARRAY; + +/***********************************************************************/ +/* Definition of class ARRAY with all its method functions. */ +/* Note: This is not a general array class that could be defined as */ +/* a template class, but rather a specific object containing a list */ +/* of values to be processed by the filter IN operator. */ +/* In addition it must act as a metaclass by being able to give back */ +/* the type of values it contains. */ +/* It must also be able to convert itself from some type to another. */ +/***********************************************************************/ +class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock + friend class MULAR; +//friend class VALLST; +//friend class SFROW; + public: + // Constructors + ARRAY(PGLOBAL g, int type, int size, int len = 1, int prec = 0); +//ARRAY(PGLOBAL g, PQUERY qryp); +//ARRAY(PGLOBAL g, PARRAY par, int k); + + // Implementation + virtual int GetType(void) {return TYPE_ARRAY;} + virtual int GetResultType(void) {return Type;} + virtual int GetLength(void) {return Len;} + virtual int GetLengthEx(void) {return Len;} + virtual int GetScale() {return 0;} + int GetNval(void) {return Nval;} + int GetSize(void) {return Size;} +// PVAL GetValp(void) {return Valp;} + void SetType(int atype) {Type = atype;} +// void SetCorrel(bool b) {Correlated = b;} + + // Methods + virtual void Reset(void) {Bot = -1;} + virtual int Qcompare(int *, int *); + virtual bool Compare(PXOB) {assert(FALSE); return FALSE;} + virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(FALSE); return FALSE;} +//virtual int CheckSpcCol(PTDB, int) {return 0;} + virtual void Print(PGLOBAL g, FILE *f, uint n); + virtual void Print(PGLOBAL g, char *ps, uint z); +// void Empty(void); + void SetPrecision(PGLOBAL g, int p); + bool AddValue(PGLOBAL g, PSZ sp); + bool AddValue(PGLOBAL g, void *p); + bool AddValue(PGLOBAL g, short n); + bool AddValue(PGLOBAL g, int n); + bool AddValue(PGLOBAL g, double f); + bool AddValue(PGLOBAL g, PXOB xp); + bool AddValue(PGLOBAL g, PVAL vp); + void GetNthValue(PVAL valp, int n); + int GetIntValue(int n); + char *GetStringValue(int n); + BYTE Vcompare(PVAL vp, int n); + void Save(int); + void Restore(int); + void Move(int, int); + bool Sort(PGLOBAL g); + void *GetSortIndex(PGLOBAL g); + bool Find(PVAL valp); + bool FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm); + int Convert(PGLOBAL g, int k, PVAL vp = NULL); + int BlockTest(PGLOBAL g, int opc, int opm, + void *minp, void *maxp, bool s); + PSZ MakeArrayList(PGLOBAL g); + bool CanBeShort(void); + bool GetSubValue(PGLOBAL g, PVAL valp, int *kp); + + protected: + // Members + PMBV Valblk; // To the MBVALS class + PVBLK Vblp; // To Valblock of the data array +//PVAL Valp; // The value used for Save and Restore is Value + int Size; // Size of value array + int Nval; // Total number of items in array + int Ndif; // Total number of distinct items in array + int Xsize; // Size of Index (used for correlated arrays) + int Type; // Type of individual values in the array + int Len; // Length of character string + int Bot; // Bottom of research index + int Top; // Top of research index + int X, Inf, Sup; // Used for block optimization +//bool Correlated; // -----------> Temporary + }; // end of class ARRAY + +/***********************************************************************/ +/* Definition of class MULAR with all its method functions. */ +/* This class is used when constructing the arrays of constants used */ +/* for indexing. Its only purpose is to provide a way to sort, reduce */ +/* and reorder the arrays of multicolumn indexes as one block. Indeed */ +/* sorting the arrays independantly would break the correspondance of */ +/* column values. */ +/***********************************************************************/ +class MULAR : public CSORT, public BLOCK { // No need to be an XOBJECT + public: + // Constructor + MULAR(PGLOBAL g, int n); + + // Implementation + void SetPars(PARRAY par, int i) {Pars[i] = par;} + + // Methods + virtual int Qcompare(int *i1, int *i2); // Sort compare routine + bool Sort(PGLOBAL g); + + protected: + // Members + int Narray; // The number of sub-arrays + PARRAY *Pars; // To the block of real arrays + }; // end of class ARRAY + +#endif // __ARRAY_H diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp new file mode 100644 index 00000000000..c1099261cef --- /dev/null +++ b/storage/connect/blkfil.cpp @@ -0,0 +1,1080 @@ +/************* BlkFil C++ Program Source Code File (.CPP) **************/ +/* PROGRAM NAME: BLKFIL */ +/* ------------- */ +/* Version 2.5 */ +/* */ +/* COPYRIGHT: */ +/* ---------- */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ +/* */ +/* WHAT THIS PROGRAM DOES: */ +/* ----------------------- */ +/* This program is the implementation of block indexing classes. */ +/* */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant MariaDB header file. */ +/***********************************************************************/ +#include "my_global.h" +#include "sql_class.h" +//#include "sql_time.h" + +#if defined(WIN32) +//#include <windows.h> +#else // !WIN32 +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#endif // !WIN32 + +/***********************************************************************/ +/* Include application header files: */ +/***********************************************************************/ +#include "global.h" // global declarations +#include "plgdbsem.h" // DB application declarations +#include "xindex.h" // Key Index class declarations +#include "filamtxt.h" // File access method dcls +#include "tabdos.h" // TDBDOS and DOSCOL class dcls +#include "array.h" // ARRAY classes dcls +#include "blkfil.h" // Block Filter classes dcls + +/***********************************************************************/ +/* Static variables. */ +/***********************************************************************/ +extern "C" int trace; + +/* ------------------------ Class BLOCKFILTER ------------------------ */ + +/***********************************************************************/ +/* BLOCKFILTER constructor. */ +/***********************************************************************/ +BLOCKFILTER::BLOCKFILTER(PTDBDOS tdbp, int op) + { + Tdbp = tdbp; + Correl = FALSE; + Opc = op; + Opm = 0; + Result = 0; + } // end of BLOCKFILTER constructor + +/***********************************************************************/ +/* Make file output of BLOCKFILTER contents. */ +/***********************************************************************/ +void BLOCKFILTER::Print(PGLOBAL g, FILE *f, uint n) + { + char m[64]; + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + + fprintf(f, "%sBLOCKFILTER: at %p opc=%d opm=%d result=%d\n", + m, this, Opc, Opm, Result); + } // end of Print + +/***********************************************************************/ +/* Make string output of BLOCKFILTER contents. */ +/***********************************************************************/ +void BLOCKFILTER::Print(PGLOBAL g, char *ps, uint z) + { + strncat(ps, "BlockFilter(s)", z); + } // end of Print + + +/* ---------------------- Class BLKFILLOG ---------------------------- */ + +/***********************************************************************/ +/* BLKFILLOG constructor. */ +/***********************************************************************/ +BLKFILLOG::BLKFILLOG(PTDBDOS tdbp, int op, PBF *bfp, int n) + : BLOCKFILTER(tdbp, op) + { + N = n; + Fil = bfp; + + for (int i = 0; i < N; i++) + if (Fil[i]) + Correl |= Fil[i]->Correl; + + } // end of BLKFILLOG constructor + +/***********************************************************************/ +/* Reset: this function is used only to check the existence of a */ +/* BLKFILIN block and have it reset its Bot value for sorted columns. */ +/***********************************************************************/ +void BLKFILLOG::Reset(PGLOBAL g) + { + for (int i = 0; i < N; i++) + if (Fil[i]) + Fil[i]->Reset(g); + + } // end of Reset + +/***********************************************************************/ +/* This function is used for block filter evaluation. We use here a */ +/* fuzzy logic between the values returned by evaluation blocks: */ +/* -2: the condition will be always false for the rest of the file. */ +/* -1: the condition will be false for the whole group. */ +/* 0: the condition may be true for some of the group values. */ +/* 1: the condition will be true for the whole group. */ +/* 2: the condition will be always true for the rest of the file. */ +/***********************************************************************/ +int BLKFILLOG::BlockEval(PGLOBAL g) + { + int i, rc; + + for (i = 0; i < N; i++) { + // 0: Means some block filter value may be True + rc = (Fil[i]) ? Fil[i]->BlockEval(g) : 0; + + if (!i) + Result = (Opc == OP_NOT) ? -rc : rc; + else switch (Opc) { + case OP_AND: + Result = MY_MIN(Result, rc); + break; + case OP_OR: + Result = MY_MAX(Result, rc); + break; + default: + // Should never happen + Result = 0; + return Result; + } // endswitch Opc + + } // endfor i + + return Result; + } // end of BlockEval + +/* ---------------------- Class BLKFILARI----------------------------- */ + +/***********************************************************************/ +/* BLKFILARI constructor. */ +/***********************************************************************/ +BLKFILARI::BLKFILARI(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp) + : BLOCKFILTER(tdbp, op) + { + Colp = (PDOSCOL)xp[0]; + + if (xp[1]->GetType() == TYPE_COLBLK) { + Cpx = (PCOL)xp[1]; // Subquery pseudo constant column + Correl = TRUE; + } else + Cpx = NULL; + + Sorted = Colp->IsSorted() > 0; + + // Don't remember why this was changed. Anyway it is no good for + // correlated subqueries because the Value must reflect changes + if (Cpx) + Valp = xp[1]->GetValue(); + else + Valp = AllocateValue(g, xp[1]->GetValue()); + + } // end of BLKFILARI constructor + +/***********************************************************************/ +/* Reset: re-eval the constant value in the case of pseudo constant */ +/* column use in a correlated subquery. */ +/***********************************************************************/ +void BLKFILARI::Reset(PGLOBAL g) + { + if (Cpx) { + Cpx->Reset(); + Cpx->Eval(g); + MakeValueBitmap(); // Does nothing for class BLKFILARI + } // endif Cpx + + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for arithmetic operators. */ +/***********************************************************************/ +int BLKFILARI::BlockEval(PGLOBAL g) + { + int mincmp, maxcmp, n; + +#if defined(_DEBUG) + assert (Colp->IsClustered()); +#endif + + n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + mincmp = Colp->GetMin()->CompVal(Valp, n); + maxcmp = Colp->GetMax()->CompVal(Valp, n); + + switch (Opc) { + case OP_EQ: + case OP_NE: + if (mincmp < 0) // Means minval > Val + Result = (Sorted) ? -2 : -1; + else if (maxcmp > 0) // Means maxval < Val + Result = -1; + else if (!mincmp && !maxcmp) // minval = maxval = val + Result = 1; + else + Result = 0; + + break; + case OP_GT: + case OP_LE: + if (mincmp < 0) // minval > Val + Result = (Sorted) ? 2 : 1; + else if (maxcmp < 0) // maxval > Val + Result = 0; + else // maxval <= Val + Result = -1; + + break; + case OP_GE: + case OP_LT: + if (mincmp <= 0) // minval >= Val + Result = (Sorted) ? 2 : 1; + else if (maxcmp <= 0) // Maxval >= Val + Result = 0; + else // Maxval < Val + Result = -1; + + break; + } // endswitch Opc + + switch (Opc) { + case OP_NE: + case OP_LE: + case OP_LT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/* ---------------------- Class BLKFILAR2----------------------------- */ + +/***********************************************************************/ +/* BLKFILAR2 constructor. */ +/***********************************************************************/ +BLKFILAR2::BLKFILAR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp) + : BLKFILARI(g, tdbp, op, xp) + { + MakeValueBitmap(); + } // end of BLKFILAR2 constructor + +/***********************************************************************/ +/* MakeValueBitmap: Set the constant value bit map. It can be void */ +/* if the constant value is not in the column distinct values list. */ +/***********************************************************************/ +void BLKFILAR2::MakeValueBitmap(void) + { + int i; // ndv = Colp->GetNdv(); + bool found = FALSE; + PVBLK dval = Colp->GetDval(); + + assert(dval); + + /*********************************************************************/ + /* Here we cannot use Find because we must get the index */ + /* of where to put the value if it is not found in the array. */ + /* This is needed by operators other than OP_EQ or OP_NE. */ + /*********************************************************************/ + found = dval->Locate(Valp, i); + + /*********************************************************************/ + /* Set the constant value bitmap. The bitmaps are really matching */ + /* the OP_EQ, OP_LE, and OP_LT operator but are also used for the */ + /* other operators for which the Result will be inverted. */ + /* The reason the bitmaps are not directly complemented for them is */ + /* to be able to test easily the cases of sorted columns with Bxp, */ + /* and the case of a void bitmap, which happens if the constant */ + /* value is not in the column distinct values list. */ + /*********************************************************************/ + if (found) { + Bmp = 1 << i; // Bit of the found value + Bxp = Bmp - 1; // All smaller values + + if (Opc != OP_LT && Opc != OP_GE) + Bxp |= Bmp; // Found value must be included + + } else { + Bmp = 0; + Bxp = (1 << i) - 1; + } // endif found + + if (!(Opc == OP_EQ || Opc == OP_NE)) + Bmp = Bxp; + + } // end of MakeValueBitmap + +/***********************************************************************/ +/* Evaluate XDB2 block filter for arithmetic operators. */ +/***********************************************************************/ +int BLKFILAR2::BlockEval(PGLOBAL g) + { +#if defined(_DEBUG) + assert (Colp->IsClustered()); +#endif + + int n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + uint bkmp = *(uint*)Colp->GetBmap()->GetValPtr(n); + uint bres = Bmp & bkmp; + + // Set result as if Opc were OP_EQ, OP_LT, or OP_LE + if (!bres) { + if (!Bmp) + Result = -2; // No good block in the table file + else if (!Sorted) + Result = -1; // No good values in this block + else // Sorted column, test for no more good blocks in file + Result = (Bxp & bkmp) ? -1 : -2; + + } else + // Test whether all block values are good or only some ones + Result = (bres == bkmp) ? 1 : 0; + + // For OP_NE, OP_GE, and OP_GT the result must be inverted. + switch (Opc) { + case OP_NE: + case OP_GE: + case OP_GT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval2: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/* ---------------------- Class BLKFILMR2----------------------------- */ + +/***********************************************************************/ +/* BLKFILMR2 constructor. */ +/***********************************************************************/ +BLKFILMR2::BLKFILMR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp) + : BLKFILARI(g, tdbp, op, xp) + { + Nbm = Colp->GetNbm(); + Bmp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + Bxp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + MakeValueBitmap(); + } // end of BLKFILMR2 constructor + +/***********************************************************************/ +/* MakeValueBitmap: Set the constant value bit map. It can be void */ +/* if the constant value is not in the column distinct values list. */ +/***********************************************************************/ +void BLKFILMR2::MakeValueBitmap(void) + { + int i; // ndv = Colp->GetNdv(); + bool found = FALSE, noteq = !(Opc == OP_EQ || Opc == OP_NE); + PVBLK dval = Colp->GetDval(); + + assert(dval); + + for (i = 0; i < Nbm; i++) + Bmp[i] = Bxp[i] = 0; + + /*********************************************************************/ + /* Here we cannot use Find because we must get the index */ + /* of where to put the value if it is not found in the array. */ + /* This is needed by operators other than OP_EQ or OP_NE. */ + /*********************************************************************/ + found = dval->Locate(Valp, i); + + /*********************************************************************/ + /* For bitmaps larger than a ULONG, we must know where Bmp and Bxp */ + /* are positioned in the ULONG bit map block array. */ + /*********************************************************************/ + N = i / MAXBMP; + i %= MAXBMP; + + /*********************************************************************/ + /* Set the constant value bitmaps. The bitmaps are really matching */ + /* the OP_EQ, OP_LE, and OP_LT operator but are also used for the */ + /* other operators for which the Result will be inverted. */ + /* The reason the bitmaps are not directly complemented for them is */ + /* to be able to easily test the cases of sorted columns with Bxp, */ + /* and the case of a void bitmap, which happens if the constant */ + /* value is not in the column distinct values list. */ + /*********************************************************************/ + if (found) { + Bmp[N] = 1 << i; + Bxp[N] = Bmp[N] - 1; + + if (Opc != OP_LT && Opc != OP_GE) + Bxp[N] |= Bmp[N]; // Found value must be included + + } else + Bxp[N] = (1 << i) - 1; + + if (noteq) + Bmp[N] = Bxp[N]; + + Void = !Bmp[N]; // There are no good values in the file + + for (i = 0; i < N; i++) { + Bxp[i] = ~0; + + if (noteq) + Bmp[i] = Bxp[i]; + + Void = Void && !Bmp[i]; + } // endfor i + + if (!Bmp[N] && !Bxp[N]) + N--; + + } // end of MakeValueBitmap + +/***********************************************************************/ +/* Evaluate XDB2 block filter for arithmetic operators. */ +/***********************************************************************/ +int BLKFILMR2::BlockEval(PGLOBAL g) + { +#if defined(_DEBUG) + assert (Colp->IsClustered()); +#endif + + int i, n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + bool fnd = FALSE, all = TRUE, gt = TRUE; + uint bres; + uint *bkmp = (uint*)Colp->GetBmap()->GetValPtr(n * Nbm); + + // Set result as if Opc were OP_EQ, OP_LT, or OP_LE + for (i = 0; i < Nbm; i++) + if (i <= N) { + if ((bres = Bmp[i] & bkmp[i])) + fnd = TRUE; // Some good value(s) found in the block + + if (bres != bkmp[i]) + all = FALSE; // Not all block values are good + + if (Bxp[i] & bkmp[i]) + gt = FALSE; // Not all block values are > good value(s) + + } else if (bkmp[i]) { + all = FALSE; + break; + } // endif's + + if (!fnd) { + if (Void || (gt && Sorted)) + Result = -2; // No (more) good block in file + else + Result = -1; // No good values in this block + + } else + Result = (all) ? 1 : 0; // All block values are good + + // For OP_NE, OP_GE, and OP_GT the result must be inverted. + switch (Opc) { + case OP_NE: + case OP_GE: + case OP_GT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval2: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/***********************************************************************/ +/* BLKSPCARI constructor. */ +/***********************************************************************/ +BLKSPCARI::BLKSPCARI(PTDBDOS tdbp, int op, PXOB *xp, int bsize) + : BLOCKFILTER(tdbp, op) + { + if (xp[1]->GetType() == TYPE_COLBLK) { + Cpx = (PCOL)xp[1]; // Subquery pseudo constant column + Correl = TRUE; + } else + Cpx = NULL; + + Valp = xp[1]->GetValue(); + Val = (int)xp[1]->GetValue()->GetIntValue(); + Bsize = bsize; + } // end of BLKFILARI constructor + +/***********************************************************************/ +/* Reset: re-eval the constant value in the case of pseudo constant */ +/* column use in a correlated subquery. */ +/***********************************************************************/ +void BLKSPCARI::Reset(PGLOBAL g) + { + if (Cpx) { + Cpx->Reset(); + Cpx->Eval(g); + Val = (int)Valp->GetIntValue(); + } // endif Cpx + + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for arithmetic operators (ROWID) */ +/***********************************************************************/ +int BLKSPCARI::BlockEval(PGLOBAL g) + { + int mincmp, maxcmp, n, m; + + n = Tdbp->GetCurBlk(); + m = n * Bsize + 1; // Minimum Rowid value for this block + mincmp = (Val > m) ? 1 : (Val < m) ? (-1) : 0; + m = (n + 1) * Bsize; // Maximum Rowid value for this block + maxcmp = (Val > m) ? 1 : (Val < m) ? (-1) : 0; + + switch (Opc) { + case OP_EQ: + case OP_NE: + if (mincmp < 0) // Means minval > Val + Result = -2; // Always sorted + else if (maxcmp > 0) // Means maxval < Val + Result = -1; + else if (!mincmp && !maxcmp) // minval = maxval = val + Result = 1; + else + Result = 0; + + break; + case OP_GT: + case OP_LE: + if (mincmp < 0) // minval > Val + Result = 2; // Always sorted + else if (maxcmp < 0) // maxval > Val + Result = 0; + else // maxval <= Val + Result = -1; + + break; + case OP_GE: + case OP_LT: + if (mincmp <= 0) // minval >= Val + Result = 2; // Always sorted + else if (maxcmp <= 0) // Maxval >= Val + Result = 0; + else // Maxval < Val + Result = -1; + + break; + } // endswitch Opc + + switch (Opc) { + case OP_NE: + case OP_LE: + case OP_LT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/* ------------------------ Class BLKFILIN --------------------------- */ + +/***********************************************************************/ +/* BLKFILIN constructor. */ +/***********************************************************************/ +BLKFILIN::BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) + : BLOCKFILTER(tdbp, op) + { + if (op == OP_IN) { + Opc = OP_EQ; + Opm = 1; + } else { + Opc = op; + Opm = opm; + } // endif op + + Colp = (PDOSCOL)xp[0]; + Arap = (PARRAY)xp[1]; + Type = Arap->GetResultType(); + + if (Colp->GetResultType() != Type) { + sprintf(g->Message, "BLKFILIN: %s", MSG(VALTYPE_NOMATCH)); + longjmp(g->jumper[g->jump_level], 99); + } else if (Colp->GetValue()->IsCi()) + Arap->SetPrecision(g, 1); // Case insensitive + + Sorted = Colp->IsSorted() > 0; + } // end of BLKFILIN constructor + +/***********************************************************************/ +/* Reset: have the sorted array reset its Bot value to -1 (bottom). */ +/***********************************************************************/ +void BLKFILIN::Reset(PGLOBAL g) + { + Arap->Reset(); +// MakeValueBitmap(); // Does nothing for class BLKFILIN + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for a IN operator on a constant array. */ +/* Note: here we need to use the GetValPtrEx function to get a zero */ +/* ended string in case of string argument. This is because the ARRAY */ +/* can have a different width than the char column. */ +/***********************************************************************/ +int BLKFILIN::BlockEval(PGLOBAL g) + { + int n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + void *minp = Colp->GetMin()->GetValPtrEx(n); + void *maxp = Colp->GetMax()->GetValPtrEx(n); + + Result = Arap->BlockTest(g, Opc, Opm, minp, maxp, Sorted); + return Result; + } // end of BlockEval + +/* ------------------------ Class BLKFILIN2 -------------------------- */ + +/***********************************************************************/ +/* BLKFILIN2 constructor. */ +/* New version that takes care of all operators and modificators. */ +/* It is also ready to handle the case of correlated sub-selects. */ +/***********************************************************************/ +BLKFILIN2::BLKFILIN2(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) + : BLKFILIN(g, tdbp, op, opm, xp) + { + Nbm = Colp->GetNbm(); + Valp = AllocateValue(g, Colp->GetValue()); + Invert = (Opc == OP_NE || Opc == OP_GE || Opc ==OP_GT); + Bmp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + Bxp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + MakeValueBitmap(); + } // end of BLKFILIN2 constructor + +/***********************************************************************/ +/* MakeValueBitmap: Set the constant values bit map. It can be void */ +/* if the constant values are not in the column distinct values list. */ +/* The bitmaps are prepared for the EQ, LE, and LT operators and */ +/* takes care of the ALL and ANY modificators. If the operators are */ +/* NE, GE, or GT the modificator is inverted and the result will be. */ +/***********************************************************************/ +void BLKFILIN2::MakeValueBitmap(void) + { + int i, k, n, ndv = Colp->GetNdv(); + bool found, noteq = !(Opc == OP_EQ || Opc == OP_NE); + bool all = (!Invert) ? (Opm == 2) : (Opm != 2); + uint btp; + PVBLK dval = Colp->GetDval(); + + N = -1; + + // Take care of special cases + if (!(n = Arap->GetNval())) { + // Return TRUE for ALL because it means that there are no item that + // does not verify the condition, which is true indeed. + // Return FALSE for ANY because TRUE means that there is at least + // one item that verifies the condition, which is false. + Result = (Opm == 2) ? 2 : -2; + return; + } else if (!noteq && all && n > 1) { + // An item cannot be equal to all different values + // or an item is always unequal to any different values + Result = (Opc == OP_EQ) ? -2 : 2; + return; + } // endif's + + for (i = 0; i < Nbm; i++) + Bmp[i] = Bxp[i] = 0; + + for (k = 0; k < n; k++) { + Arap->GetNthValue(Valp, k); + found = dval->Locate(Valp, i); + N = i / MAXBMP; + btp = 1 << (i % MAXBMP); + + if (found) + Bmp[N] |= btp; + + // For LT and LE if ALL the condition applies to the smallest item + // if ANY it applies to the largest item. In the case of EQ we come + // here only if ANY or if n == 1, so it does applies to the largest. + if ((!k && all) || (k == n - 1 && !all)) { + Bxp[N] = btp - 1; + + if (found && Opc != OP_LT && Opc != OP_GE) + Bxp[N] |= btp; // Found value must be included + + } // endif k, opm + + } // endfor k + + if (noteq) + Bmp[N] = Bxp[N]; + + Void = !Bmp[N]; // There are no good values in the file + + for (i = 0; i < N; i++) { + Bxp[i] = ~0; + + if (noteq) { + Bmp[i] = Bxp[i]; + Void = FALSE; + } // endif noteq + + } // endfor i + + if (!Bmp[N] && !Bxp[N]) { + if (--N < 0) + // All array values are smaller than block values + Result = (Invert) ? 2 : -2; + + } else if (N == Nbm - 1 && (signed)Bmp[N] == (1 << (ndv % MAXBMP)) - 1) { + // Condition will be always TRUE or FALSE for the whole file + Result = (Invert) ? -2 : 2; + N = -1; + } // endif's + + } // end of MakeValueBitmap + +/***********************************************************************/ +/* Evaluate block filter for set operators on a constant array. */ +/* Note: here we need to use the GetValPtrEx function to get a zero */ +/* ended string in case of string argument. This is because the ARRAY */ +/* can have a different width than the char column. */ +/***********************************************************************/ +int BLKFILIN2::BlockEval(PGLOBAL g) + { + if (N < 0) + return Result; // Was set in MakeValueBitmap + + int i, n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + bool fnd = FALSE, all = TRUE, gt = TRUE; + uint bres; + uint *bkmp = (uint*)Colp->GetBmap()->GetValPtr(n * Nbm); + + // Set result as if Opc were OP_EQ, OP_LT, or OP_LE + // The difference between ALL or ANY was handled in MakeValueBitmap + for (i = 0; i < Nbm; i++) + if (i <= N) { + if ((bres = Bmp[i] & bkmp[i])) + fnd = TRUE; + + if (bres != bkmp[i]) + all = FALSE; + + if (Bxp[i] & bkmp[i]) + gt = FALSE; + + } else if (bkmp[i]) { + all = FALSE; + break; + } // endif's + + if (!fnd) { + if (Void || (Sorted && gt)) + Result = -2; // No more good block in file + else + Result = -1; // No good values in this block + + } else if (all) + Result = 1; // All block values are good + else + Result = 0; // Block contains some good values + + // For OP_NE, OP_GE, and OP_GT the result must be inverted. + switch (Opc) { + case OP_NE: + case OP_GE: + case OP_GT: + Result = -Result; + break; + } // endswitch Opc + + return Result; + } // end of BlockEval + +#if 0 +/***********************************************************************/ +/* BLKFILIN2 constructor. */ +/***********************************************************************/ +BLKFILIN2::BLKFILIN2(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) + : BLKFILIN(g, tdbp, op, opm, xp) + { + // Currently, bitmap matching is only implemented for the IN operator + if (!(Bitmap = (op == OP_IN || (op == OP_EQ && opm != 2)))) { + Nbm = Colp->GetNbm(); + N = 0; + return; // Revert to standard minmax method + } // endif minmax + + int i, n; + ULONG btp; + PVAL valp = AllocateValue(g, Colp->GetValue()); + PVBLK dval = Colp->GetDval(); + + Nbm = Colp->GetNbm(); + N = -1; + Bmp = (PULONG)PlugSubAlloc(g, NULL, Nbm * sizeof(ULONG)); + Bxp = (PULONG)PlugSubAlloc(g, NULL, Nbm * sizeof(ULONG)); + + for (i = 0; i < Nbm; i++) + Bmp[i] = Bxp[i] = 0; + + for (n = 0; n < Arap->GetNval(); n++) { + Arap->GetNthValue(valp, n); + + if ((i = dval->Find(valp)) >= 0) + Bmp[i / MAXBMP] |= 1 << (i % MAXBMP); + + } // endfor n + + for (i = Nbm - 1; i >= 0; i--) + if (Bmp[i]) { + for (btp = Bmp[i]; btp; btp >>= 1) + Bxp[i] |= btp; + + for (N = i--; i >= 0; i--) + Bxp[i] = ~0; + + break; + } // endif Bmp + + } // end of BLKFILIN2 constructor + +/***********************************************************************/ +/* Evaluate block filter for a IN operator on a constant array. */ +/* Note: here we need to use the GetValPtrEx function to get a zero */ +/* ended string in case of string argument. This is because the ARRAY */ +/* can have a different width than the char column. */ +/***********************************************************************/ +int BLKFILIN2::BlockEval(PGLOBAL g) + { + if (N < 0) + return -2; // IN list contains no good values + + int i, n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + bool fnd = FALSE, all = TRUE, gt = TRUE; + ULONG bres; + PULONG bkmp = (PULONG)Colp->GetBmap()->GetValPtr(n * Nbm); + + if (Bitmap) { + // For IN operator use the bitmap method + for (i = 0; i < Nbm; i++) + if (i <= N) { + if ((bres = Bmp[i] & bkmp[i])) + fnd = TRUE; + + if (bres != bkmp[i]) + all = FALSE; + + if (Bxp[i] & bkmp[i]) + gt = FALSE; + + } else if (bkmp[i]) { + all = FALSE; + break; + } // endif's + + if (!fnd) { + if (Sorted && gt) + Result = -2; // No more good block in file + else + Result = -1; // No good values in this block + + } else if (all) + Result = 1; // All block values are good + else + Result = 0; // Block contains some good values + + } else { + // For other than IN operators, revert to standard minmax method + int n = 0, ndv = Colp->GetNdv(); + void *minp = NULL; + void *maxp = NULL; + ULONG btp; + PVBLK dval = Colp->GetDval(); + + for (i = 0; i < Nbm; i++) + for (btp = 1; btp && n < ndv; btp <<= 1, n++) + if (btp & bkmp[i]) { + if (!minp) + minp = dval->GetValPtrEx(n); + + maxp = dval->GetValPtrEx(n); + } // endif btp + + Result = Arap->BlockTest(g, Opc, Opm, minp, maxp, Colp->IsSorted()); + } // endif Bitmap + + return Result; + } // end of BlockEval +#endif // 0 + +/* ------------------------ Class BLKSPCIN --------------------------- */ + +/***********************************************************************/ +/* BLKSPCIN constructor. */ +/***********************************************************************/ +BLKSPCIN::BLKSPCIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, + PXOB *xp, int bsize) + : BLOCKFILTER(tdbp, op) + { + if (op == OP_IN) { + Opc = OP_EQ; + Opm = 1; + } else + Opm = opm; + + Arap = (PARRAY)xp[1]; +#if defined(_DEBUG) + assert (Opm); + assert (Arap->GetResultType() == TYPE_INT); +#endif + Bsize = bsize; + } // end of BLKSPCIN constructor + +/***********************************************************************/ +/* Reset: have the sorted array reset its Bot value to -1 (bottom). */ +/***********************************************************************/ +void BLKSPCIN::Reset(PGLOBAL g) + { + Arap->Reset(); + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for a IN operator on a constant array. */ +/***********************************************************************/ +int BLKSPCIN::BlockEval(PGLOBAL g) + { + int n = Tdbp->GetCurBlk(); + int minrow = n * Bsize + 1; // Minimum Rowid value for this block + int maxrow = (n + 1) * Bsize; // Maximum Rowid value for this block + + Result = Arap->BlockTest(g, Opc, Opm, &minrow, &maxrow, TRUE); + return Result; + } // end of BlockEval + +/* ------------------------------------------------------------------- */ + +#if 0 +/***********************************************************************/ +/* Implementation of the BLOCKINDEX class. */ +/***********************************************************************/ +BLOCKINDEX::BLOCKINDEX(PBX nx, PDOSCOL cp, PKXBASE kp) + { + Next = nx; + Tdbp = (cp) ? (PTDBDOS)cp->GetTo_Tdb() : NULL; + Colp = cp; + Kxp = kp; + Type = (cp) ? cp->GetResultType() : TYPE_ERROR; + Sorted = (cp) ? cp->IsSorted() > 0 : FALSE; + Result = 0; + } // end of BLOCKINDEX constructor + +/***********************************************************************/ +/* Reset Bot and Top values of optimized Kindex blocks. */ +/***********************************************************************/ +void BLOCKINDEX::Reset(void) + { + if (Next) + Next->Reset(); + + Kxp->Reset(); + } // end of Reset + +/***********************************************************************/ +/* Evaluate block indexing test. */ +/***********************************************************************/ +int BLOCKINDEX::BlockEval(PGLOBAL g) + { +#if defined(_DEBUG) + assert (Tdbp && Colp); +#endif + int n = Tdbp->GetCurBlk(); + void *minp = Colp->GetMin()->GetValPtr(n); + void *maxp = Colp->GetMax()->GetValPtr(n); + + Result = Kxp->BlockTest(g, minp, maxp, Type, Sorted); + return Result; + } // end of BlockEval + +/***********************************************************************/ +/* Make file output of BLOCKINDEX contents. */ +/***********************************************************************/ +void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n) + { + char m[64]; + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + + fprintf(f, "%sBLOCKINDEX: at %p next=%p col=%s kxp=%p result=%d\n", + m, this, Next, (Colp) ? Colp->GetName() : "Rowid", Kxp, Result); + + if (Next) + Next->Print(g, f, n); + + } // end of Print + +/***********************************************************************/ +/* Make string output of BLOCKINDEX contents. */ +/***********************************************************************/ +void BLOCKINDEX::Print(PGLOBAL g, char *ps, UINT z) + { + strncat(ps, "BlockIndex(es)", z); + } // end of Print + +/* ------------------------------------------------------------------- */ + +/***********************************************************************/ +/* Implementation of the BLOCKINDX2 class. */ +/***********************************************************************/ +BLOCKINDX2::BLOCKINDX2(PBX nx, PDOSCOL cp, PKXBASE kp) + : BLOCKINDEX(nx, cp, kp) + { + Nbm = Colp->GetNbm(); + Dval = Colp->GetDval(); + Bmap = Colp->GetBmap(); +#if defined(_DEBUG) + assert(Dval && Bmap); +#endif + } // end of BLOCKINDX2 constructor + +/***********************************************************************/ +/* Evaluate block indexing test. */ +/***********************************************************************/ +int BLOCKINDX2::BlockEval(PGLOBAL g) + { + int n = Tdbp->GetCurBlk(); + PUINT bmp = (PUINT)Bmap->GetValPtr(n * Nbm); + + Result = Kxp->BlockTst2(g, Dval, bmp, Nbm, Type, Sorted); + return Result; + } // end of BlockEval + +/* ------------------------------------------------------------------- */ + +/***********************************************************************/ +/* Implementation of the BLKSPCINDX class. */ +/***********************************************************************/ +BLKSPCINDX::BLKSPCINDX(PBX nx, PTDBDOS tp, PKXBASE kp, int bsize) + : BLOCKINDEX(nx, NULL, kp) + { + Tdbp = tp; + Bsize = bsize; + Type = TYPE_INT; + Sorted = TRUE; + } // end of BLKSPCINDX constructor + +/***********************************************************************/ +/* Evaluate block indexing test. */ +/***********************************************************************/ +int BLKSPCINDX::BlockEval(PGLOBAL g) + { + int n = Tdbp->GetCurBlk(); + int minrow = n * Bsize + 1; // Minimum Rowid value for this block + int maxrow = (n + 1) * Bsize; // Maximum Rowid value for this block + + Result = Kxp->BlockTest(g, &minrow, &maxrow, TYPE_INT, TRUE); + return Result; + } // end of BlockEval +#endif // 0 diff --git a/storage/connect/blkfil.h b/storage/connect/blkfil.h new file mode 100644 index 00000000000..00b00139042 --- /dev/null +++ b/storage/connect/blkfil.h @@ -0,0 +1,295 @@ +/*************** BlkFil H Declares Source Code File (.H) ***************/ +/* Name: BLKFIL.H Version 2.1 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2010 */ +/* */ +/* This file contains the block optimization related classes declares */ +/***********************************************************************/ +#ifndef __BLKFIL__ +#define __BLKFIL__ + +typedef class BLOCKFILTER *PBF; +typedef class BLOCKINDEX *PBX; + +/***********************************************************************/ +/* Definition of class BLOCKFILTER. */ +/***********************************************************************/ +class DllExport BLOCKFILTER : public BLOCK { /* Block Filter */ + friend class BLKFILLOG; + public: + // Constructors + BLOCKFILTER(PTDBDOS tdbp, int op); + + // Implementation + int GetResult(void) {return Result;} + bool Correlated(void) {return Correl;} + + // Methods + virtual void Reset(PGLOBAL) = 0; + virtual int BlockEval(PGLOBAL) = 0; + virtual void Print(PGLOBAL g, FILE *f, uint n); + virtual void Print(PGLOBAL g, char *ps, uint z); + + protected: + BLOCKFILTER(void) {} // Standard constructor not to be used + + // Members + PTDBDOS Tdbp; // Owner TDB + bool Correl; // TRUE for correlated subqueries + int Opc; // Comparison operator + int Opm; // Operator modificator + int Result; // Result from evaluation + }; // end of class BLOCKFILTER + +/***********************************************************************/ +/* Definition of class BLKFILLOG (with Op=OP_AND,OP_OR, or OP_NOT) */ +/***********************************************************************/ +class DllExport BLKFILLOG : public BLOCKFILTER { /* Logical Op Block Filter */ + public: + // Constructors + BLKFILLOG(PTDBDOS tdbp, int op, PBF *bfp, int n); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + + protected: + BLKFILLOG(void) {} // Standard constructor not to be used + + // Members + PBF *Fil; // Points to Block filter args + int N; + }; // end of class BLKFILLOG + +/***********************************************************************/ +/* Definition of class BLKFILARI (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/***********************************************************************/ +class DllExport BLKFILARI : public BLOCKFILTER { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKFILARI(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void) {} + + protected: + BLKFILARI(void) {} // Standard constructor not to be used + + // Members + PDOSCOL Colp; // Points to column argument + PCOL Cpx; // Point to subquery "constant" column + PVAL Valp; // Points to constant argument Value + bool Sorted; // True if the column is sorted + }; // end of class BLKFILARI + +/***********************************************************************/ +/* Definition of class BLKFILAR2 (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/***********************************************************************/ +class DllExport BLKFILAR2 : public BLKFILARI { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKFILAR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp); + + // Methods + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void); + + protected: + BLKFILAR2(void) {} // Standard constructor not to be used + + // Members + uint Bmp; // The value bitmap used to test blocks + uint Bxp; // Bitmap used when Opc = OP_EQ + }; // end of class BLKFILAR2 + +/***********************************************************************/ +/* Definition of class BLKFILAR2 (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/* To be used when the bitmap is an array of ULONG bitmaps; */ +/***********************************************************************/ +class DllExport BLKFILMR2 : public BLKFILARI { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKFILMR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp); + + // Methods + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void); + + protected: + BLKFILMR2(void) {} // Standard constructor not to be used + + // Members + int Nbm; // The number of ULONG bitmaps + int N; // The position of the leftmost ULONG + bool Void; // True if all file blocks can be skipped + uint *Bmp; // The values bitmaps used to test blocks + uint *Bxp; // Bit of values <= max value + }; // end of class BLKFILMR2 + +/***********************************************************************/ +/* Definition of class BLKSPCARI (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/***********************************************************************/ +class DllExport BLKSPCARI : public BLOCKFILTER { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKSPCARI(PTDBDOS tdbp, int op, PXOB *xp, int bsize); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + + protected: + BLKSPCARI(void) {} // Standard constructor not to be used + + // Members + PCOL Cpx; // Point to subquery "constant" column + PVAL Valp; // Points to constant argument Value + int Val; // Constant argument Value + int Bsize; // Table block size + }; // end of class BLKSPCARI + +/***********************************************************************/ +/* Definition of class BLKFILIN (with Op=OP_IN) */ +/***********************************************************************/ +class DllExport BLKFILIN : public BLOCKFILTER { // With array arguments. + public: + // Constructors + BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void) {} + + protected: + // Member + PDOSCOL Colp; // Points to column argument + PARRAY Arap; // Points to array argument + bool Sorted; // True if the column is sorted + int Type; // Type of array elements + }; // end of class BLKFILIN + +/***********************************************************************/ +/* Definition of class BLKFILIN2 (with Op=OP_IN) */ +/***********************************************************************/ +class DllExport BLKFILIN2 : public BLKFILIN { // With array arguments. + public: + // Constructors + BLKFILIN2(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp); + + // Methods +//virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void); + + protected: + // Member + int Nbm; // The number of ULONG bitmaps + int N; // The position of the leftmost ULONG +//bool Bitmap; // True for IN operator (temporary) + bool Void; // True if all file blocks can be skipped + bool Invert; // True when Result must be inverted + uint *Bmp; // The values bitmaps used to test blocks + uint *Bxp; // Bit of values <= max value + PVAL Valp; // Used while building the bitmaps + }; // end of class BLKFILIN2 + +/***********************************************************************/ +/* Definition of class BLKSPCIN (with Op=OP_IN) Special column */ +/***********************************************************************/ +class DllExport BLKSPCIN : public BLOCKFILTER { // With array arguments. + public: + // Constructors + BLKSPCIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp, int bsize); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + + protected: + // Member + PARRAY Arap; // Points to array argument + int Bsize; // Table block size + }; // end of class BLKSPCIN + +// ---------------- Class used in block indexing testing ---------------- + +#if 0 +/***********************************************************************/ +/* Definition of class BLOCKINDEX. */ +/* Used to test the indexing to joined tables when the foreign key is */ +/* a clustered or sorted column. If the table is joined to several */ +/* tables, blocks will be chained together. */ +/***********************************************************************/ +class DllExport BLOCKINDEX : public BLOCK { /* Indexing Test Block */ + public: + // Constructors + BLOCKINDEX(PBX nx, PDOSCOL cp, PKXBASE kp); + + // Implementation + PBX GetNext(void) {return Next;} + + // Methods + void Reset(void); + virtual int BlockEval(PGLOBAL); + virtual void Print(PGLOBAL g, FILE *f, UINT n); + virtual void Print(PGLOBAL g, char *ps, UINT z); + + protected: + BLOCKINDEX(void) {} // Standard constructor not to be used + + // Members + PBX Next; // To next Index Block + PTDBDOS Tdbp; // To table description block + PDOSCOL Colp; // Clustered foreign key + PKXBASE Kxp; // To Kindex of joined table + bool Sorted; // TRUE if column is sorted + int Type; // Col/Index type + int Result; // Result from evaluation + }; // end of class BLOCKINDEX + +/***********************************************************************/ +/* Definition of class BLOCKINDX2. (XDB2) */ +/***********************************************************************/ +class DllExport BLOCKINDX2 : public BLOCKINDEX { /* Indexing Test Block */ + public: + // Constructors + BLOCKINDX2(PBX nx, PDOSCOL cp, PKXBASE kp); + + // Methods + virtual int BlockEval(PGLOBAL); + + protected: + BLOCKINDX2(void) {} // Standard constructor not to be used + + // Members + int Nbm; // The number of ULONG bitmaps + PVBLK Dval; // Array of column distinct values + PVBLK Bmap; // Array of block bitmap values + }; // end of class BLOCKINDX2 + +/***********************************************************************/ +/* Definition of class BLKSPCINDX. */ +/* Used to test the indexing to joined tables when the foreign key is */ +/* the ROWID special column. If the table is joined to several */ +/* tables, blocks will be chained together. */ +/***********************************************************************/ +class DllExport BLKSPCINDX : public BLOCKINDEX { /* Indexing Test Block */ + public: + // Constructors + BLKSPCINDX(PBX nx, PTDBDOS tp, PKXBASE kp, int bsize); + + // Methods + virtual int BlockEval(PGLOBAL); + + protected: + BLKSPCINDX(void) {} // Standard constructor not to be used + + // Members + int Bsize; // Table block size + }; // end of class BLOCKINDEX +#endif // 0 + +#endif // __BLKFIL__ diff --git a/storage/connect/catalog.h b/storage/connect/catalog.h index 6e6cf86fc87..411660431b9 100644 --- a/storage/connect/catalog.h +++ b/storage/connect/catalog.h @@ -43,6 +43,8 @@ typedef struct _colinfo { int Key; int Precision; int Scale; + int Opt; + int Freq; char *Remark; char *Datefmt; char *Fieldfmt; @@ -82,7 +84,7 @@ class DllExport CATALOG { virtual bool TestCond(PGLOBAL g, const char *name, const char *type) {return true;} virtual bool DropTable(PGLOBAL g, PSZ name, bool erase) {return true;} - virtual PTDB GetTable(PGLOBAL g, PTABLE tablep, + virtual PTDB GetTable(PGLOBAL g, PTABLE tablep, MODE mode = MODE_READ, LPCSTR type = NULL) {return NULL;} virtual void TableNames(PGLOBAL g, char *buffer, int maxbuf, int info[]) {} diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index ffa29bb9821..81ab1ad7245 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -1,376 +1,419 @@ -/************* Colblk C++ Functions Source Code File (.CPP) ************/ -/* Name: COLBLK.CPP Version 2.0 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2013 */ -/* */ -/* This file contains the COLBLK class functions. */ -/***********************************************************************/ - -/***********************************************************************/ -/* Include relevant MariaDB header file. */ -/***********************************************************************/ -#include "my_global.h" - -/***********************************************************************/ -/* Include required application header files */ -/* global.h is header containing all global Plug declarations. */ -/* plgdbsem.h is header containing the DB applic. declarations. */ -/***********************************************************************/ -#include "global.h" -#include "plgdbsem.h" -#include "tabcol.h" -#include "colblk.h" -#include "xindex.h" -#include "xtable.h" - -extern "C" int trace; - -/***********************************************************************/ -/* COLBLK protected constructor. */ -/***********************************************************************/ -COLBLK::COLBLK(PCOLDEF cdp, PTDB tdbp, int i) - { - Next = NULL; - Index = i; -//Number = 0; - ColUse = 0; - - if ((Cdp = cdp)) { - Name = cdp->Name; - Format = cdp->F; - Long = cdp->Long; - Precision = cdp->Precision; - Buf_Type = cdp->Buf_Type; - ColUse |= cdp->Flags; // Used by CONNECT - Nullable = !!(cdp->Flags & U_NULLS); - Unsigned = !!(cdp->Flags & U_UNSIGNED); - } else { - Name = NULL; - memset(&Format, 0, sizeof(FORMAT)); - Long = 0; - Precision = 0; - Buf_Type = TYPE_ERROR; - Nullable = false; - Unsigned = false; - } // endif cdp - - To_Tdb = tdbp; - Status = BUF_NO; -//Value = NULL; done in XOBJECT constructor - To_Kcol = NULL; - } // end of COLBLK constructor - -/***********************************************************************/ -/* COLBLK constructor used for copying columns. */ -/* tdbp is the pointer to the new table descriptor. */ -/***********************************************************************/ -COLBLK::COLBLK(PCOL col1, PTDB tdbp) - { - PCOL colp; - - // Copy the old column block to the new one - *this = *col1; - Next = NULL; -//To_Orig = col1; - To_Tdb = tdbp; - - if (trace > 1) - htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this); - - if (tdbp) - // Attach the new column to the table block - if (!tdbp->GetColumns()) - tdbp->SetColumns(this); - else { - for (colp = tdbp->GetColumns(); colp->Next; colp = colp->Next) ; - - colp->Next = this; - } // endelse - - } // end of COLBLK copy constructor - -/***********************************************************************/ -/* Reset the column descriptor to non evaluated yet. */ -/***********************************************************************/ -void COLBLK::Reset(void) - { - Status &= ~BUF_READ; - } // end of Reset - -/***********************************************************************/ -/* Compare: compares itself to an (expression) object and returns */ -/* true if it is equivalent. */ -/***********************************************************************/ -bool COLBLK::Compare(PXOB xp) - { - return (this == xp); - } // end of Compare - -/***********************************************************************/ -/* SetFormat: function used to set SELECT output format. */ -/***********************************************************************/ -bool COLBLK::SetFormat(PGLOBAL g, FORMAT& fmt) - { - fmt = Format; - - if (trace > 1) - htrc("COLBLK: %p format=%c(%d,%d)\n", - this, *fmt.Type, fmt.Length, fmt.Prec); - - return false; - } // end of SetFormat - -/***********************************************************************/ -/* Eval: get the column value from the last read record or from a */ -/* matching Index column if there is one. */ -/***********************************************************************/ -bool COLBLK::Eval(PGLOBAL g) - { - if (trace > 1) - htrc("Col Eval: %s status=%.4X\n", Name, Status); - - if (!GetStatus(BUF_READ)) { -// if (To_Tdb->IsNull()) -// Value->Reset(); - if (To_Kcol) - To_Kcol->FillValue(Value); - else - ReadColumn(g); - - AddStatus(BUF_READ); - } // endif - - return false; - } // end of Eval - -/***********************************************************************/ -/* InitValue: prepare a column block for read operation. */ -/* Now we use Format.Length for the len parameter to avoid strings */ -/* to be truncated when converting from string to coded string. */ -/* Added in version 1.5 is the arguments GetScale() and Domain */ -/* in calling AllocateValue. Domain is used for TYPE_DATE only. */ -/***********************************************************************/ -bool COLBLK::InitValue(PGLOBAL g) - { - if (Value) - return false; // Already done - - // Allocate a Value object - if (!(Value = AllocateValue(g, Buf_Type, Precision, - GetScale(), Unsigned, GetDomain()))) - return true; - - AddStatus(BUF_READY); - Value->SetNullable(Nullable); - - if (trace > 1) - htrc(" colp=%p type=%d value=%p coluse=%.4X status=%.4X\n", - this, Buf_Type, Value, ColUse, Status); - - return false; - } // end of InitValue - -/***********************************************************************/ -/* SetBuffer: prepare a column block for write operation. */ -/***********************************************************************/ -bool COLBLK::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) - { - sprintf(g->Message, MSG(UNDEFINED_AM), "SetBuffer"); - return true; - } // end of SetBuffer - -/***********************************************************************/ -/* GetLength: returns an evaluation of the column string length. */ -/***********************************************************************/ -int COLBLK::GetLengthEx(void) - { - return Long; - } // end of GetLengthEx - -/***********************************************************************/ -/* ReadColumn: what this routine does is to access the last line */ -/* read from the corresponding table, extract from it the field */ -/* corresponding to this column and convert it to buffer type. */ -/***********************************************************************/ -void COLBLK::ReadColumn(PGLOBAL g) - { - sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of ReadColumn - -/***********************************************************************/ -/* WriteColumn: what this routine does is to access the last line */ -/* read from the corresponding table, and rewrite the field */ -/* corresponding to this column from the column buffer and type. */ -/***********************************************************************/ -void COLBLK::WriteColumn(PGLOBAL g) - { - sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn - -/***********************************************************************/ -/* Make file output of a column descriptor block. */ -/***********************************************************************/ -void COLBLK::Print(PGLOBAL g, FILE *f, uint n) - { - char m[64]; - int i; - PCOL colp; - - memset(m, ' ', n); // Make margin string - m[n] = '\0'; - - for (colp = To_Tdb->GetColumns(), i = 1; colp; colp = colp->Next, i++) - if (colp == this) - break; - - fprintf(f, "%sR%dC%d type=%d F=%.2s(%d,%d)", m, To_Tdb->GetTdb_No(), - i, GetAmType(), Format.Type, Format.Length, Format.Prec); - fprintf(f, - " coluse=%04X status=%04X buftyp=%d value=%p name=%s\n", - ColUse, Status, Buf_Type, Value, Name); - } // end of Print - -/***********************************************************************/ -/* Make string output of a column descriptor block. */ -/***********************************************************************/ -void COLBLK::Print(PGLOBAL g, char *ps, uint z) - { - sprintf(ps, "R%d.%s", To_Tdb->GetTdb_No(), Name); - } // end of Print - - -/***********************************************************************/ -/* SPCBLK constructor. */ -/***********************************************************************/ -SPCBLK::SPCBLK(PCOLUMN cp) - : COLBLK((PCOLDEF)NULL, cp->GetTo_Table()->GetTo_Tdb(), 0) - { - Name = (char*)cp->GetName(); - Precision = Long = 0; - Buf_Type = TYPE_ERROR; - } // end of SPCBLK constructor - -/***********************************************************************/ -/* WriteColumn: what this routine does is to access the last line */ -/* read from the corresponding table, and rewrite the field */ -/* corresponding to this column from the column buffer and type. */ -/***********************************************************************/ -void SPCBLK::WriteColumn(PGLOBAL g) - { - sprintf(g->Message, MSG(SPCOL_READONLY), Name); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn - -/***********************************************************************/ -/* RIDBLK constructor for the ROWID special column. */ -/***********************************************************************/ -RIDBLK::RIDBLK(PCOLUMN cp, bool rnm) : SPCBLK(cp) - { - Precision = Long = 10; - Buf_Type = TYPE_INT; - Rnm = rnm; - *Format.Type = 'N'; - Format.Length = 10; - } // end of RIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the ordinal */ -/* number of the current row in the table (if Rnm is true) or in the */ -/* current file (if Rnm is false) the same except for multiple tables.*/ -/***********************************************************************/ -void RIDBLK::ReadColumn(PGLOBAL g) - { - Value->SetValue(To_Tdb->RowNumber(g, Rnm)); - } // end of ReadColumn - -/***********************************************************************/ -/* FIDBLK constructor for the FILEID special column. */ -/***********************************************************************/ -FIDBLK::FIDBLK(PCOLUMN cp) : SPCBLK(cp) - { -//Is_Key = 2; for when the MUL table indexed reading will be implemented. - Precision = Long = _MAX_PATH; - Buf_Type = TYPE_STRING; - *Format.Type = 'C'; - Format.Length = Long; -#if defined(WIN32) - Format.Prec = 1; // Case insensitive -#endif // WIN32 - Constant = (!((PTDBASE)To_Tdb)->GetDef()->GetMultiple() && - To_Tdb->GetAmType() != TYPE_AM_PLG && - To_Tdb->GetAmType() != TYPE_AM_PLM); - Fn = NULL; - } // end of FIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the current */ -/* file ID of the table (can change for Multiple tables). */ -/***********************************************************************/ -void FIDBLK::ReadColumn(PGLOBAL g) - { - if (Fn != ((PTDBASE)To_Tdb)->GetFile(g)) { - char filename[_MAX_PATH]; - - Fn = ((PTDBASE)To_Tdb)->GetFile(g); - PlugSetPath(filename, Fn, ((PTDBASE)To_Tdb)->GetPath()); - Value->SetValue_psz(filename); - } // endif Fn - - } // end of ReadColumn - -/***********************************************************************/ -/* TIDBLK constructor for the TABID special column. */ -/***********************************************************************/ -TIDBLK::TIDBLK(PCOLUMN cp) : SPCBLK(cp) - { -//Is_Key = 2; for when the MUL table indexed reading will be implemented. - Precision = Long = 64; - Buf_Type = TYPE_STRING; - *Format.Type = 'C'; - Format.Length = Long; - Format.Prec = 1; // Case insensitive - Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL); - Tname = NULL; - } // end of TIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the table ID. */ -/***********************************************************************/ -void TIDBLK::ReadColumn(PGLOBAL g) - { - if (Tname == NULL) { - Tname = (char*)To_Tdb->GetName(); - Value->SetValue_psz(Tname); - } // endif Tname - - } // end of ReadColumn - -/***********************************************************************/ -/* SIDBLK constructor for the SERVID special column. */ -/***********************************************************************/ -SIDBLK::SIDBLK(PCOLUMN cp) : SPCBLK(cp) - { -//Is_Key = 2; for when the MUL table indexed reading will be implemented. - Precision = Long = 64; - Buf_Type = TYPE_STRING; - *Format.Type = 'C'; - Format.Length = Long; - Format.Prec = 1; // Case insensitive - Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL); - Sname = NULL; - } // end of TIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the server ID. */ -/***********************************************************************/ -void SIDBLK::ReadColumn(PGLOBAL g) - { -//if (Sname == NULL) { - Sname = (char*)To_Tdb->GetServer(); - Value->SetValue_psz(Sname); -// } // endif Sname - - } // end of ReadColumn - +/************* Colblk C++ Functions Source Code File (.CPP) ************/
+/* Name: COLBLK.CPP Version 2.1 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* */
+/* This file contains the COLBLK class functions. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant MariaDB header file. */
+/***********************************************************************/
+#include "my_global.h"
+
+/***********************************************************************/
+/* Include required application header files */
+/* global.h is header containing all global Plug declarations. */
+/* plgdbsem.h is header containing the DB applic. declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "tabcol.h"
+#include "colblk.h"
+#include "xindex.h"
+#include "xtable.h"
+
+extern "C" int trace;
+
+/***********************************************************************/
+/* COLBLK protected constructor. */
+/***********************************************************************/
+COLBLK::COLBLK(PCOLDEF cdp, PTDB tdbp, int i)
+ {
+ Next = NULL;
+ Index = i;
+//Number = 0;
+ ColUse = 0;
+
+ if ((Cdp = cdp)) {
+ Name = cdp->Name;
+ Format = cdp->F;
+ Opt = cdp->Opt;
+ Long = cdp->Long;
+ Precision = cdp->Precision;
+ Freq = cdp->Freq;
+ Buf_Type = cdp->Buf_Type;
+ ColUse |= cdp->Flags; // Used by CONNECT
+ Nullable = !!(cdp->Flags & U_NULLS);
+ Unsigned = !!(cdp->Flags & U_UNSIGNED);
+ } else {
+ Name = NULL;
+ memset(&Format, 0, sizeof(FORMAT));
+ Opt = 0;
+ Long = 0;
+ Precision = 0;
+ Freq = 0;
+ Buf_Type = TYPE_ERROR;
+ Nullable = false;
+ Unsigned = false;
+ } // endif cdp
+
+ To_Tdb = tdbp;
+ Status = BUF_NO;
+//Value = NULL; done in XOBJECT constructor
+ To_Kcol = NULL;
+ } // end of COLBLK constructor
+
+/***********************************************************************/
+/* COLBLK constructor used for copying columns. */
+/* tdbp is the pointer to the new table descriptor. */
+/***********************************************************************/
+COLBLK::COLBLK(PCOL col1, PTDB tdbp)
+ {
+ PCOL colp;
+
+ // Copy the old column block to the new one
+ *this = *col1;
+ Next = NULL;
+//To_Orig = col1;
+ To_Tdb = tdbp;
+
+ if (trace > 1)
+ htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this);
+
+ if (tdbp)
+ // Attach the new column to the table block
+ if (!tdbp->GetColumns())
+ tdbp->SetColumns(this);
+ else {
+ for (colp = tdbp->GetColumns(); colp->Next; colp = colp->Next) ;
+
+ colp->Next = this;
+ } // endelse
+
+ } // end of COLBLK copy constructor
+
+/***********************************************************************/
+/* Reset the column descriptor to non evaluated yet. */
+/***********************************************************************/
+void COLBLK::Reset(void)
+ {
+ Status &= ~BUF_READ;
+ } // end of Reset
+
+/***********************************************************************/
+/* Compare: compares itself to an (expression) object and returns */
+/* true if it is equivalent. */
+/***********************************************************************/
+bool COLBLK::Compare(PXOB xp)
+ {
+ return (this == xp);
+ } // end of Compare
+
+/***********************************************************************/
+/* SetFormat: function used to set SELECT output format. */
+/***********************************************************************/
+bool COLBLK::SetFormat(PGLOBAL g, FORMAT& fmt)
+ {
+ fmt = Format;
+
+ if (trace > 1)
+ htrc("COLBLK: %p format=%c(%d,%d)\n",
+ this, *fmt.Type, fmt.Length, fmt.Prec);
+
+ return false;
+ } // end of SetFormat
+
+/***********************************************************************/
+/* Eval: get the column value from the last read record or from a */
+/* matching Index column if there is one. */
+/***********************************************************************/
+bool COLBLK::Eval(PGLOBAL g)
+ {
+ if (trace > 1)
+ htrc("Col Eval: %s status=%.4X\n", Name, Status);
+
+ if (!GetStatus(BUF_READ)) {
+// if (To_Tdb->IsNull())
+// Value->Reset();
+ if (To_Kcol)
+ To_Kcol->FillValue(Value);
+ else
+ ReadColumn(g);
+
+ AddStatus(BUF_READ);
+ } // endif
+
+ return false;
+ } // end of Eval
+
+/***********************************************************************/
+/* InitValue: prepare a column block for read operation. */
+/* Now we use Format.Length for the len parameter to avoid strings */
+/* to be truncated when converting from string to coded string. */
+/* Added in version 1.5 is the arguments GetScale() and Domain */
+/* in calling AllocateValue. Domain is used for TYPE_DATE only. */
+/***********************************************************************/
+bool COLBLK::InitValue(PGLOBAL g)
+ {
+ if (Value)
+ return false; // Already done
+
+ // Allocate a Value object
+ if (!(Value = AllocateValue(g, Buf_Type, Precision,
+ GetScale(), Unsigned, GetDomain())))
+ return true;
+
+ AddStatus(BUF_READY);
+ Value->SetNullable(Nullable);
+
+ if (trace > 1)
+ htrc(" colp=%p type=%d value=%p coluse=%.4X status=%.4X\n",
+ this, Buf_Type, Value, ColUse, Status);
+
+ return false;
+ } // end of InitValue
+
+/***********************************************************************/
+/* SetBuffer: prepare a column block for write operation. */
+/***********************************************************************/
+bool COLBLK::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
+ {
+ sprintf(g->Message, MSG(UNDEFINED_AM), "SetBuffer");
+ return true;
+ } // end of SetBuffer
+
+/***********************************************************************/
+/* GetLength: returns an evaluation of the column string length. */
+/***********************************************************************/
+int COLBLK::GetLengthEx(void)
+ {
+ return Long;
+ } // end of GetLengthEx
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to access the last line */
+/* read from the corresponding table, extract from it the field */
+/* corresponding to this column and convert it to buffer type. */
+/***********************************************************************/
+void COLBLK::ReadColumn(PGLOBAL g)
+ {
+ sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn");
+ longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* WriteColumn: what this routine does is to access the last line */
+/* read from the corresponding table, and rewrite the field */
+/* corresponding to this column from the column buffer and type. */
+/***********************************************************************/
+void COLBLK::WriteColumn(PGLOBAL g)
+ {
+ sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn");
+ longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
+ } // end of WriteColumn
+
+/***********************************************************************/
+/* Make file output of a column descriptor block. */
+/***********************************************************************/
+void COLBLK::Print(PGLOBAL g, FILE *f, uint n)
+ {
+ char m[64];
+ int i;
+ PCOL colp;
+
+ memset(m, ' ', n); // Make margin string
+ m[n] = '\0';
+
+ for (colp = To_Tdb->GetColumns(), i = 1; colp; colp = colp->Next, i++)
+ if (colp == this)
+ break;
+
+ fprintf(f, "%sR%dC%d type=%d F=%.2s(%d,%d)", m, To_Tdb->GetTdb_No(),
+ i, GetAmType(), Format.Type, Format.Length, Format.Prec);
+ fprintf(f,
+ " coluse=%04X status=%04X buftyp=%d value=%p name=%s\n",
+ ColUse, Status, Buf_Type, Value, Name);
+ } // end of Print
+
+/***********************************************************************/
+/* Make string output of a column descriptor block. */
+/***********************************************************************/
+void COLBLK::Print(PGLOBAL g, char *ps, uint z)
+ {
+ sprintf(ps, "R%d.%s", To_Tdb->GetTdb_No(), Name);
+ } // end of Print
+
+
+/***********************************************************************/
+/* SPCBLK constructor. */
+/***********************************************************************/
+SPCBLK::SPCBLK(PCOLUMN cp)
+ : COLBLK((PCOLDEF)NULL, cp->GetTo_Table()->GetTo_Tdb(), 0)
+ {
+ Name = (char*)cp->GetName();
+ Precision = Long = 0;
+ Buf_Type = TYPE_ERROR;
+ } // end of SPCBLK constructor
+
+/***********************************************************************/
+/* WriteColumn: what this routine does is to access the last line */
+/* read from the corresponding table, and rewrite the field */
+/* corresponding to this column from the column buffer and type. */
+/***********************************************************************/
+void SPCBLK::WriteColumn(PGLOBAL g)
+ {
+ sprintf(g->Message, MSG(SPCOL_READONLY), Name);
+ longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
+ } // end of WriteColumn
+
+/***********************************************************************/
+/* RIDBLK constructor for the ROWID special column. */
+/***********************************************************************/
+RIDBLK::RIDBLK(PCOLUMN cp, bool rnm) : SPCBLK(cp)
+ {
+ Precision = Long = 10;
+ Buf_Type = TYPE_INT;
+ Rnm = rnm;
+ *Format.Type = 'N';
+ Format.Length = 10;
+ } // end of RIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the ordinal */
+/* number of the current row in the table (if Rnm is true) or in the */
+/* current file (if Rnm is false) the same except for multiple tables.*/
+/***********************************************************************/
+void RIDBLK::ReadColumn(PGLOBAL g)
+ {
+ Value->SetValue(To_Tdb->RowNumber(g, Rnm));
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* FIDBLK constructor for the FILEID special column. */
+/***********************************************************************/
+FIDBLK::FIDBLK(PCOLUMN cp, OPVAL op) : SPCBLK(cp), Op(op)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = _MAX_PATH;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+#if defined(WIN32)
+ Format.Prec = 1; // Case insensitive
+#endif // WIN32
+ Constant = (!((PTDBASE)To_Tdb)->GetDef()->GetMultiple() &&
+ To_Tdb->GetAmType() != TYPE_AM_PLG &&
+ To_Tdb->GetAmType() != TYPE_AM_PLM);
+ Fn = NULL;
+ } // end of FIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the current */
+/* file ID of the table (can change for Multiple tables). */
+/***********************************************************************/
+void FIDBLK::ReadColumn(PGLOBAL g)
+ {
+ if (Fn != ((PTDBASE)To_Tdb)->GetFile(g)) {
+ char filename[_MAX_PATH];
+
+ Fn = ((PTDBASE)To_Tdb)->GetFile(g);
+ PlugSetPath(filename, Fn, ((PTDBASE)To_Tdb)->GetPath());
+
+ if (Op != OP_XX) {
+ char buff[_MAX_PATH];
+
+ Value->SetValue_psz(ExtractFromPath(g, buff, filename, Op));
+ } else
+ Value->SetValue_psz(filename);
+
+ } // endif Fn
+
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* TIDBLK constructor for the TABID special column. */
+/***********************************************************************/
+TIDBLK::TIDBLK(PCOLUMN cp) : SPCBLK(cp)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = 64;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+ Format.Prec = 1; // Case insensitive
+ Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL);
+ Tname = NULL;
+ } // end of TIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the table ID. */
+/***********************************************************************/
+void TIDBLK::ReadColumn(PGLOBAL g)
+ {
+ if (Tname == NULL) {
+ Tname = (char*)To_Tdb->GetName();
+ Value->SetValue_psz(Tname);
+ } // endif Tname
+
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* PRTBLK constructor for the PARTID special column. */
+/***********************************************************************/
+PRTBLK::PRTBLK(PCOLUMN cp) : SPCBLK(cp)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = 64;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+ Format.Prec = 1; // Case insensitive
+ Constant = true; // TODO: check whether this is true indeed
+ Pname = NULL;
+ } // end of PRTBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the partition ID. */
+/***********************************************************************/
+void PRTBLK::ReadColumn(PGLOBAL g)
+ {
+ if (Pname == NULL) {
+ char *p;
+ PTDBASE tdbp = (PTDBASE)To_Tdb;
+
+ Pname = tdbp->GetDef()->GetStringCatInfo(g, "partname", "?");
+
+ p = strrchr(Pname, '#');
+ Value->SetValue_psz((p) ? p + 1 : Pname);
+ } // endif Pname
+
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* SIDBLK constructor for the SERVID special column. */
+/***********************************************************************/
+SIDBLK::SIDBLK(PCOLUMN cp) : SPCBLK(cp)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = 64;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+ Format.Prec = 1; // Case insensitive
+ Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL);
+ Sname = NULL;
+ } // end of TIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the server ID. */
+/***********************************************************************/
+void SIDBLK::ReadColumn(PGLOBAL g)
+ {
+//if (Sname == NULL) {
+ Sname = (char*)To_Tdb->GetServer();
+ Value->SetValue_psz(Sname);
+// } // endif Sname
+
+ } // end of ReadColumn
+
diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h index a340ee4450a..5e8dc77ff69 100644 --- a/storage/connect/colblk.h +++ b/storage/connect/colblk.h @@ -36,10 +36,13 @@ class DllExport COLBLK : public XOBJECT { virtual int GetAmType() {return TYPE_AM_ERROR;} virtual void SetOk(void) {Status |= BUF_EMPTY;} virtual PTDB GetTo_Tdb(void) {return To_Tdb;} + virtual int GetClustered(void) {return 0;} + virtual int IsClustered(void) {return FALSE;} PCOL GetNext(void) {return Next;} PSZ GetName(void) {return Name;} int GetIndex(void) {return Index;} ushort GetColUse(void) {return ColUse;} + int GetOpt(void) {return Opt;} ushort GetColUse(ushort u) {return (ColUse & u);} ushort GetStatus(void) {return Status;} ushort GetStatus(ushort u) {return (Status & u);} @@ -48,17 +51,18 @@ class DllExport COLBLK : public XOBJECT { void AddColUse(ushort u) {ColUse |= u;} void AddStatus(ushort u) {Status |= u;} void SetNext(PCOL cp) {Next = cp;} + PXCOL GetKcol(void) {return To_Kcol;} void SetKcol(PXCOL kcp) {To_Kcol = kcp;} PCOLDEF GetCdp(void) {return Cdp;} PSZ GetDomain(void) {return (Cdp) ? Cdp->Decode : NULL;} PSZ GetDesc(void) {return (Cdp) ? Cdp->Desc : NULL;} PSZ GetFmt(void) {return (Cdp) ? Cdp->Fmt : NULL;} bool IsUnsigned(void) {return Unsigned;} - bool IsNullable(void) {return Nullable;} bool IsVirtual(void) {return Cdp->IsVirtual();} + bool IsNullable(void) {return Nullable;} void SetNullable(bool b) {Nullable = b;} - - // Methods + + // Methods virtual void Reset(void); virtual bool Compare(PXOB xp); virtual bool SetFormat(PGLOBAL, FORMAT&); @@ -70,6 +74,7 @@ class DllExport COLBLK : public XOBJECT { virtual void WriteColumn(PGLOBAL g); virtual void Print(PGLOBAL g, FILE *, uint); virtual void Print(PGLOBAL g, char *, uint); + virtual bool VarSize(void) {return false;} bool InitValue(PGLOBAL g); protected: @@ -82,9 +87,11 @@ class DllExport COLBLK : public XOBJECT { bool Nullable; // True if nullable bool Unsigned; // True if unsigned int Index; // Column number in table + int Opt; // Cluster/sort information int Buf_Type; // Data type int Long; // Internal length in table int Precision; // Column length (as for ODBC) + int Freq; // Evaluated ceiling of distinct values FORMAT Format; // Output format ushort ColUse; // Column usage ushort Status; // Column read status @@ -100,7 +107,7 @@ class DllExport SPCBLK : public COLBLK { // Implementation virtual int GetAmType(void) = 0; - virtual bool GetRnm(void) {return false;} + virtual bool GetRnm(void) {return false;} // Methods virtual bool IsSpecial(void) {return true;} @@ -122,7 +129,7 @@ class DllExport RIDBLK : public SPCBLK { // Implementation virtual int GetAmType(void) {return TYPE_AM_ROWID;} - virtual bool GetRnm(void) {return Rnm;} + virtual bool GetRnm(void) {return Rnm;} // Methods virtual void ReadColumn(PGLOBAL g); @@ -137,7 +144,7 @@ class DllExport RIDBLK : public SPCBLK { class DllExport FIDBLK : public SPCBLK { public: // Constructor - FIDBLK(PCOLUMN cp); + FIDBLK(PCOLUMN cp, OPVAL op); // Implementation virtual int GetAmType(void) {return TYPE_AM_FILID;} @@ -147,7 +154,8 @@ class DllExport FIDBLK : public SPCBLK { virtual void ReadColumn(PGLOBAL g); protected: - PSZ Fn; // The current To_File of the table + PSZ Fn; // The current To_File of the table + OPVAL Op; // The file part operator }; // end of class FIDBLK /***********************************************************************/ @@ -174,6 +182,29 @@ class DllExport TIDBLK : public SPCBLK { }; // end of class TIDBLK /***********************************************************************/ +/* Class PRTBLK: PARTID special column descriptor. */ +/***********************************************************************/ +class DllExport PRTBLK : public SPCBLK { + public: + // Constructor + PRTBLK(PCOLUMN cp); + + // Implementation + virtual int GetAmType(void) {return TYPE_AM_PRTID;} + + // Methods + virtual void Reset(void) {} // This is a pseudo constant column + virtual void ReadColumn(PGLOBAL g); + + protected: + // Default constructor not to be used + PRTBLK(void) {} + + // Members + PSZ Pname; // The current partition name + }; // end of class PRTBLK + +/***********************************************************************/ /* Class SIDBLK: SERVID special column descriptor. */ /***********************************************************************/ class DllExport SIDBLK : public SPCBLK { diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 2d8aeb8b5f4..e495a0f62b9 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -57,7 +57,7 @@ extern "C" int trace; /* Routines called internally by semantic routines. */ /***********************************************************************/ void CntEndDB(PGLOBAL); -RCODE EvalColumns(PGLOBAL g, PTDB tdbp); +RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool mrr= false); /***********************************************************************/ /* MySQL routines called externally by semantic routines. */ @@ -142,7 +142,7 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) return true; ((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); - dbuserp->UseTemp= TMP_YES; // Must use temporary file + dbuserp->UseTemp= TMP_AUTO; /*********************************************************************/ /* All is correct. */ @@ -167,7 +167,13 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info) if (tdbp) { b= tdbp->GetFtype() != RECFM_NAF; info->data_file_length= (b) ? (ulonglong)tdbp->GetFileLength(g) : 0; - info->records= (unsigned)tdbp->GetMaxSize(g); + + if (!b || info->data_file_length) + info->records= (unsigned)tdbp->Cardinality(g); +// info->records= (unsigned)tdbp->GetMaxSize(g); + else + info->records= 0; + // info->mean_rec_length= tdbp->GetLrecl(); info->mean_rec_length= 0; info->data_file_name= (b) ? tdbp->GetFile(g) : NULL; @@ -343,12 +349,12 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, //tdbp->SetMode(mode); - if (del && ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF) { + if (del/* && ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF*/) { // To avoid erasing the table when doing a partial delete // make a fake Next - PDOSDEF ddp= new(g) DOSDEF; - PTDB tp= new(g) TDBDOS(ddp, NULL); - tdbp->SetNext(tp); +// PDOSDEF ddp= new(g) DOSDEF; +// PTDB tp= new(g) TDBDOS(ddp, NULL); + tdbp->SetNext((PTDB)1); dup->Check &= ~CHK_DELETE; } // endif del @@ -387,7 +393,7 @@ bool CntRewindTable(PGLOBAL g, PTDB tdbp) /***********************************************************************/ /* Evaluate all columns after a record is read. */ /***********************************************************************/ -RCODE EvalColumns(PGLOBAL g, PTDB tdbp) +RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool mrr) { RCODE rc= RC_OK; PCOL colp; @@ -415,7 +421,7 @@ RCODE EvalColumns(PGLOBAL g, PTDB tdbp) colp->Reset(); // Virtual columns are computed by MariaDB - if (!colp->GetColUse(U_VIRTUAL)) + if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol())) if (colp->Eval(g)) rc= RC_FX; @@ -439,8 +445,8 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) // Reading sequencially an indexed table. This happens after the // handler function records_in_range was called and MySQL decides // to quit using the index (!!!) Drop the index. - for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) - colp->SetKcol(NULL); +// for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) +// colp->SetKcol(NULL); ((PTDBASE)tdbp)->ResetKindex(g, NULL); } // endif index @@ -456,7 +462,12 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) goto err; } // endif rc - while ((rc= (RCODE)tdbp->ReadDB(g)) == RC_NF) ; + do { + if ((rc= (RCODE)tdbp->ReadDB(g)) == RC_OK) + if (!ApplyFilter(g, tdbp->GetFilter())) + rc= RC_NF; + + } while (rc == RC_NF); err: g->jump_level--; @@ -529,7 +540,7 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all) if (((PTDBASE)tdbp)->GetDef()->Indexable() && all) ((PTDBDOS)tdbp)->Cardinal= 0; - + // Return result code from delete operation // Note: if all, this call will be done when closing the table rc= (RCODE)tdbp->DeleteDB(g, (all) ? RC_FX : RC_OK); @@ -539,16 +550,23 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all) /***********************************************************************/ /* CLOSETAB: Close a table. */ /***********************************************************************/ -int CntCloseTable(PGLOBAL g, PTDB tdbp) +int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) { int rc= RC_OK; TDBDOX *tbxp= NULL; - if (!tdbp || tdbp->GetUse() != USE_OPEN) + if (!tdbp) return rc; // Nothing to do + else if (tdbp->GetUse() != USE_OPEN) { + if (tdbp->GetAmType() == TYPE_AM_XML) + tdbp->CloseDB(g); // Opened by GetMaxSize + + return rc; + } // endif !USE_OPEN if (trace) - printf("CntCloseTable: tdbp=%p mode=%d\n", tdbp, tdbp->GetMode()); + printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n", + tdbp, tdbp->GetMode(), nox, abort); if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine @@ -561,14 +579,16 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp) } // endif if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { + rc= RC_FX; g->jump_level--; goto err; } // endif // This will close the table file(s) and also finalize write // operations such as Insert, Update, or Delete. + tdbp->SetAbort(abort); tdbp->CloseDB(g); - + tdbp->SetAbort(false); g->jump_level--; if (trace > 1) @@ -577,17 +597,18 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp) //if (!((PTDBDOX)tdbp)->GetModified()) // return 0; - if (tdbp->GetMode() == MODE_READ || tdbp->GetMode() == MODE_ANY) + if (nox || tdbp->GetMode() == MODE_READ || tdbp->GetMode() == MODE_ANY) return 0; if (trace > 1) - printf("About to reset indexes\n"); + printf("About to reset opt\n"); // Make all the eventual indexes tbxp= (TDBDOX*)tdbp; tbxp->ResetKindex(g, NULL); tbxp->To_Key_Col= NULL; - rc= tbxp->ResetTableOpt(g, ((PTDBASE)tdbp)->GetDef()->Indexable() == 1); + rc= tbxp->ResetTableOpt(g, true, + ((PTDBASE)tdbp)->GetDef()->Indexable() == 1); err: if (trace > 1) @@ -603,15 +624,8 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp) /***********************************************************************/ int CntIndexInit(PGLOBAL g, PTDB ptdb, int id) { - int k; - PCOL colp; - PVAL valp; - PKXBASE xp; - PXLOAD pxp; PIXDEF xdp; - XKPDEF *kdp; PTDBDOX tdbp; - PCOLDEF cdp; DOXDEF *dfp; if (!ptdb) @@ -650,63 +664,27 @@ int CntIndexInit(PGLOBAL g, PTDB ptdb, int id) return 0; } // endif xdp - // Allocate the key columns definition block - tdbp->Knum= xdp->GetNparts(); - tdbp->To_Key_Col= (PCOL*)PlugSubAlloc(g, NULL, tdbp->Knum * sizeof(PCOL)); - - // Get the key column description list - for (k= 0, kdp= (XKPDEF*)xdp->GetToKeyParts(); kdp; kdp= (XKPDEF*)kdp->Next) - if (!(colp= tdbp->ColDB(g, kdp->Name, 0)) || colp->InitValue(g)) { - sprintf(g->Message, "Wrong column %s", kdp->Name); - return 0; - } else - tdbp->To_Key_Col[k++]= colp; - -#if defined(_DEBUG) - if (k != tdbp->Knum) { - sprintf(g->Message, "Key part number mismatch for %s", - xdp->GetName()); - return 0; - } // endif k -#endif // _DEBUG - - // Allocate the pseudo constants that will contain the key values - tdbp->To_Link= (PXOB*)PlugSubAlloc(g, NULL, tdbp->Knum * sizeof(PXOB)); - - for (k= 0, kdp= (XKPDEF*)xdp->GetToKeyParts(); - kdp; k++, kdp= (XKPDEF*)kdp->Next) { - cdp= tdbp->Key(k)->GetCdp(); - valp= AllocateValue(g, cdp->GetType(), cdp->GetLength()); - tdbp->To_Link[k]= new(g) CONSTANT(valp); - } // endfor k - - // Make the index on xdp - if (!xdp->IsAuto()) { - if (dfp->Huge) - pxp= new(g) XHUGE; - else - pxp= new(g) XFILE; - - if (tdbp->Knum == 1) // Single index - xp= new(g) XINDXS(tdbp, xdp, pxp, tdbp->To_Key_Col, tdbp->To_Link); - else // Multi-Column index - xp= new(g) XINDEX(tdbp, xdp, pxp, tdbp->To_Key_Col, tdbp->To_Link); - - } else // Column contains same values as ROWID - xp= new(g) XXROW(tdbp); - - if (xp->Init(g)) +#if 0 + if (xdp->IsDynamic()) { + // This is a dynamically created index (KINDEX) + // It should not be created now, if called by index range + tdbp->SetXdp(xdp); + return (xdp->IsUnique()) ? 1 : 2; + } // endif dynamic +#endif // 0 + + // Static indexes must be initialized now for records_in_range + if (tdbp->InitialyzeIndex(g, xdp)) return 0; - tdbp->To_Kindex= xp; - return (xp->IsMul()) ? 2 : 1; + return (tdbp->To_Kindex->IsMul()) ? 2 : 1; } // end of CntIndexInit /***********************************************************************/ /* IndexRead: fetch a record having the index value. */ /***********************************************************************/ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, - const void *key, int len) + const void *key, int len, bool mrr) { char *kp= (char*)key; int n, x; @@ -737,18 +715,29 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, // Set reference values and index operator if (!tdbp->To_Link || !tdbp->To_Kindex) { - sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); - return RC_FX; - } else - xbp= (XXBASE*)tdbp->To_Kindex; +// if (!tdbp->To_Xdp) { + sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); + return RC_FX; +#if 0 + } // endif !To_Xdp + // Now it's time to make the dynamic index + if (tdbp->InitialyzeIndex(g, NULL)) { + sprintf(g->Message, "Fail to make dynamic index %s", + tdbp->To_Xdp->GetName()); + return RC_FX; + } // endif MakeDynamicIndex +#endif // 0 + } // endif !To_Kindex + + xbp= (XXBASE*)tdbp->To_Kindex; if (key) { for (n= 0; n < tdbp->Knum; n++) { colp= (PCOL)tdbp->To_Key_Col[n]; - + if (colp->GetColUse(U_NULLS)) kp++; // Skip null byte - + valp= tdbp->To_Link[n]->GetValue(); if (!valp->IsTypeNum()) { @@ -774,7 +763,7 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, valp->SetBinValue((void*)kp); kp+= valp->GetClen(); - + if (len == kp - (char*)key) { n++; break; @@ -793,7 +782,7 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, rnd: if ((rc= (RCODE)ptdb->ReadDB(g)) == RC_OK) - rc= EvalColumns(g, ptdb); + rc= EvalColumns(g, ptdb, mrr); return rc; } // end of CntIndexRead @@ -828,28 +817,32 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, } else tdbp= (PTDBDOX)ptdb; - if (!tdbp->To_Link || !tdbp->To_Kindex) { - sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); - DBUG_PRINT("Range", ("%s", g->Message)); - return -1; + if (!tdbp->To_Kindex || !tdbp->To_Link) { + if (!tdbp->To_Xdp) { + sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); + DBUG_PRINT("Range", ("%s", g->Message)); + return -1; + } else // Dynamic index + return tdbp->To_Xdp->GetMaxSame(); // TODO a better estimate + } else xbp= (XXBASE*)tdbp->To_Kindex; for (b= false, i= 0; i < 2; i++) { p= kp= key[i]; - + if (kp) { for (n= 0; n < tdbp->Knum; n++) { if (kmap[i] & (key_part_map)(1 << n)) { if (b == true) // Cannot do indexing with missing intermediate key - return -1; + return -1; colp= (PCOL)tdbp->To_Key_Col[n]; - + if (colp->GetColUse(U_NULLS)) p++; // Skip null byte ??? - + valp= tdbp->To_Link[n]->GetValue(); if (!valp->IsTypeNum()) { @@ -862,7 +855,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, if (rcb) { if (tdbp->RowNumber(g)) - sprintf(g->Message, + sprintf(g->Message, "Out of range value for column %s at row %d", colp->GetName(), tdbp->RowNumber(g)); else @@ -881,7 +874,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, } // endif trace p+= valp->GetClen(); - + if (len[i] == (unsigned)(p - kp)) { n++; break; diff --git a/storage/connect/connect.h b/storage/connect/connect.h index 4c9cee46daf..145991a3b74 100644 --- a/storage/connect/connect.h +++ b/storage/connect/connect.h @@ -33,10 +33,10 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname); PTDB CntGetTDB(PGLOBAL g, const char *name, MODE xmod, PHC); bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE, char *, char *, bool, PHC); bool CntRewindTable(PGLOBAL g, PTDB tdbp); -int CntCloseTable(PGLOBAL g, PTDB tdbp); +int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort); int CntIndexInit(PGLOBAL g, PTDB tdbp, int id); RCODE CntReadNext(PGLOBAL g, PTDB tdbp); -RCODE CntIndexRead(PGLOBAL g, PTDB, OPVAL op, const void *k, int n); +RCODE CntIndexRead(PGLOBAL g, PTDB, OPVAL op, const void *k, int n, bool mrr); RCODE CntWriteRow(PGLOBAL g, PTDB tdbp); RCODE CntUpdateRow(PGLOBAL g, PTDB tdbp); RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all); @@ -58,9 +58,9 @@ class DOXDEF: public DOSDEF { /***********************************************************************/ class TDBDOX: public TDBDOS { friend int MakeIndex(PGLOBAL, PTDB, PIXDEF); - friend int CntCloseTable(PGLOBAL, PTDB); + friend int CntCloseTable(PGLOBAL, PTDB, bool, bool); friend int CntIndexInit(PGLOBAL, PTDB, int); - friend RCODE CntIndexRead(PGLOBAL, PTDB, OPVAL, const void*, int); + friend RCODE CntIndexRead(PGLOBAL, PTDB, OPVAL, const void*, int, bool); friend RCODE CntDeleteRow(PGLOBAL, PTDB, bool); friend int CntIndexRange(PGLOBAL, PTDB, const uchar**, uint*, bool*, key_part_map*); diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 1723ee4ac27..b5958c1a854 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -1,11 +1,11 @@ /*********** File AM Map C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMAP */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -48,6 +48,11 @@ extern "C" int trace; +/***********************************************************************/ +/* Routine called externally by MAPFAM MakeDeletedFile function. */ +/***********************************************************************/ +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); + /* --------------------------- Class MAPFAM -------------------------- */ /***********************************************************************/ @@ -89,7 +94,7 @@ int MAPFAM::GetFileLength(PGLOBAL g) { int len; - len = (To_Fb) ? To_Fb->Length : TXTFAM::GetFileLength(g); + len = (To_Fb) ? To_Fb->Length : TXTFAM::GetFileLength(g); if (trace) htrc("Mapped file length=%d\n", len); @@ -129,9 +134,9 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) && fp->Count && fp->Mode == mode) break; - if (trace) - htrc("Mapping file, fp=%p\n", fp); - +#ifdef DEBTRACE + htrc("Mapping file, fp=%p\n", fp); +#endif } else fp = NULL; @@ -322,8 +327,26 @@ int MAPFAM::ReadBuffer(PGLOBAL g) /*******************************************************************/ /* Record file position in case of UPDATE or DELETE. */ /*******************************************************************/ - Fpos = Mempos; - CurBlk = (int)Rows++; + int rc; + + next: + Fpos = Mempos; + CurBlk = (int)Rows++; + + /*******************************************************************/ + /* Check whether optimization on ROWID */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + // Skip this record + if ((rc = SkipRecord(g, FALSE)) != RC_OK) + return rc; + + goto next; + } // endswitch rc } else Placed = false; @@ -380,23 +403,30 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) } // endif irc - if (Tpos == Spos) + if (Tpos == Spos) { /*******************************************************************/ - /* First line to delete. Move of eventual preceding lines is */ + /* First line to delete. Move of eventual preceeding lines is */ /* not required here, just setting of future Spos and Tpos. */ /*******************************************************************/ - Tpos = Fpos; // Spos is set below - else if ((n = Fpos - Spos) > 0) { - /*******************************************************************/ - /* Non consecutive line to delete. Move intermediate lines. */ - /*******************************************************************/ + Tpos = Spos = Fpos; + Indxd = Tdbp->GetKindex() != NULL; + } // endif Tpos + + if (Indxd) { + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_PCHAR, Fpos, &To_Pos); + (void)AddListValue(g, TYPE_PCHAR, Mempos, &To_Sos); + } else if ((n = Fpos - Spos) > 0) { + /*****************************************************************/ + /* Non consecutive line to delete. Move intermediate lines. */ + /*****************************************************************/ memmove(Tpos, Spos, n); Tpos += n; if (trace) htrc("move %d bytes\n", n); - } // endif n + } // endif n if (irc == RC_OK) { Spos = Mempos; // New start position @@ -407,6 +437,10 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) } else if (To_Fb) { // Can be NULL for deleted files /*******************************************************************/ /* Last call after EOF has been reached. */ + /*******************************************************************/ + Abort = (Indxd && MakeDeletedFile(g)); + + /*******************************************************************/ /* We must firstly Unmap the view and use the saved file handle */ /* to put an EOF at the end of the copied part of the file. */ /*******************************************************************/ @@ -415,40 +449,46 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) CloseMemMap(fp->Memory, (size_t)fp->Length); fp->Count = 0; // Avoid doing it twice - /*******************************************************************/ - /* Remove extra records. */ - /*******************************************************************/ - n = Tpos - Memory; + if (!Abort) { + /*****************************************************************/ + /* Remove extra records. */ + /*****************************************************************/ + n = Tpos - Memory; #if defined(WIN32) - DWORD drc = SetFilePointer(fp->Handle, n, NULL, FILE_BEGIN); + DWORD drc = SetFilePointer(fp->Handle, n, NULL, FILE_BEGIN); - if (drc == 0xFFFFFFFF) { - sprintf(g->Message, MSG(FUNCTION_ERROR), - "SetFilePointer", GetLastError()); - CloseHandle(fp->Handle); - return RC_FX; - } // endif + if (drc == 0xFFFFFFFF) { + sprintf(g->Message, MSG(FUNCTION_ERROR), + "SetFilePointer", GetLastError()); + CloseHandle(fp->Handle); + return RC_FX; + } // endif - if (trace) - htrc("done, Tpos=%p newsize=%d drc=%d\n", Tpos, n, drc); + if (trace) + htrc("done, Tpos=%p newsize=%d drc=%d\n", Tpos, n, drc); - if (!SetEndOfFile(fp->Handle)) { - sprintf(g->Message, MSG(FUNCTION_ERROR), - "SetEndOfFile", GetLastError()); - CloseHandle(fp->Handle); - return RC_FX; - } // endif + if (!SetEndOfFile(fp->Handle)) { + sprintf(g->Message, MSG(FUNCTION_ERROR), + "SetEndOfFile", GetLastError()); + CloseHandle(fp->Handle); + return RC_FX; + } // endif - CloseHandle(fp->Handle); #else // UNIX - if (ftruncate(fp->Handle, (off_t)n)) { - sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno)); - close(fp->Handle); - return RC_FX; - } // endif + if (ftruncate(fp->Handle, (off_t)n)) { + sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno)); + close(fp->Handle); + return RC_FX; + } // endif - close(fp->Handle); +#endif // UNIX + } // endif Abort + +#if defined(WIN32) + CloseHandle(fp->Handle); +#else // UNIX + close(fp->Handle); #endif // UNIX } // endif irc @@ -456,12 +496,61 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) } // end of DeleteRecords /***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleting them. */ +/* What we do here is to reorder the deleted records and move the */ +/* intermediate files from the ordered deleted record positions. */ +/***********************************************************************/ +bool MAPFAM::MakeDeletedFile(PGLOBAL g) + { + int *ix, i, n; + + /*********************************************************************/ + /* Make and order the arrays from the saved values. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(Sosar = MakeValueArray(g, To_Sos))) { + strcpy(g->Message, "Start position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + for (i = 0; i < Posar->GetNval(); i++) { + Fpos = Posar->GetStringValue(ix[i]); + + if (!i) { + Tpos = Fpos; + } else if ((n = Fpos - Spos) >= 0) { + // Move all not deleted lines preceding this one + memmove(Tpos, Spos, n); + Tpos += n; + } // endif n + + // New start position + Spos = Sosar->GetStringValue(ix[i]); + } // endfor i + + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Table file close routine for MAP access method. */ /***********************************************************************/ -void MAPFAM::CloseTableFile(PGLOBAL g) +void MAPFAM::CloseTableFile(PGLOBAL g, bool abort) { PlugCloseFile(g, To_Fb); - To_Fb = NULL; // To get correct file size in Cardinality + To_Fb = NULL; // To get correct file size in Cardinality if (trace) htrc("MAP Close: closing %s count=%d\n", @@ -488,7 +577,7 @@ MBKFAM::MBKFAM(PDOSDEF tdp) : MAPFAM(tdp) Block = tdp->GetBlock(); Last = tdp->GetLast(); Nrec = tdp->GetElemt(); - BlkPos = NULL; + BlkPos = tdp->GetTo_Pos(); CurNum = Nrec; } // end of MBKFAM standard constructor @@ -508,9 +597,7 @@ void MBKFAM::Reset(void) /***********************************************************************/ int MBKFAM::Cardinality(PGLOBAL g) { - // Should not be called in this version - return (g) ? -1 : 0; -//return (g) ? (int)((Block - 1) * Nrec + Last) : 1; + return (g) ? (int)((Block - 1) * Nrec + Last) : 1; } // end of Cardinality /***********************************************************************/ @@ -534,8 +621,49 @@ int MBKFAM::GetRowID(void) /***********************************************************************/ int MBKFAM::ReadBuffer(PGLOBAL g) { - strcpy(g->Message, "This AM cannot be used in this version"); - return RC_FX; + int len; + + /*********************************************************************/ + /* Sequential block reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Placed = false; + } else if (Mempos >= Top) { // Are we at the end of the memory + return RC_EF; + } else if (++CurNum < Nrec) { + Fpos = Mempos; + } else { + /*******************************************************************/ + /* New block. */ + /*******************************************************************/ + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + + Fpos = Mempos = Memory + BlkPos[CurBlk]; + } // endif's + + // Immediately calculate next position (Used by DeleteDB) + while (*Mempos++ != '\n') ; // What about Unix ??? + + // Set caller line buffer + len = (Mempos - Fpos) - Ending; + memcpy(Tdbp->GetLine(), Fpos, len); + Tdbp->GetLine()[len] = '\0'; + return RC_OK; } // end of ReadBuffer /***********************************************************************/ @@ -618,17 +746,29 @@ int MPXFAM::ReadBuffer(PGLOBAL g) Placed = false; } else if (Mempos >= Top) { // Are we at the end of the memory return RC_EF; - } else if (++CurNum < Nrec) { + } else if (++CurNum < Nrec) { Fpos = Mempos; - } else { + } else { /*******************************************************************/ /* New block. */ /*******************************************************************/ - CurNum = 0; + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc - if (++CurBlk >= Block) - return RC_EF; - Fpos = Mempos = Headlen + Memory + CurBlk * Blksize; } // endif's diff --git a/storage/connect/filamap.h b/storage/connect/filamap.h index 7f71b90a18f..7d3203d7ff2 100644 --- a/storage/connect/filamap.h +++ b/storage/connect/filamap.h @@ -1,7 +1,7 @@ /*************** FilAMap H Declares Source Code File (.H) **************/ -/* Name: FILAMAP.H Version 1.2 */ +/* Name: FILAMAP.H Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* This file contains the MAP file access method classes declares. */ /***********************************************************************/ @@ -33,23 +33,26 @@ class DllExport MAPFAM : public TXTFAM { virtual void Reset(void); virtual int GetFileLength(PGLOBAL g); virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;} + virtual int MaxBlkSize(PGLOBAL g, int s) {return s;} virtual int GetRowID(void); virtual bool RecordPos(PGLOBAL g); - virtual bool SetPos(PGLOBAL g, int recpos); + virtual bool SetPos(PGLOBAL g, int recpos); virtual int SkipRecord(PGLOBAL g, bool header); virtual bool OpenTableFile(PGLOBAL g); virtual bool DeferReading(void) {return false;} virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: + bool MakeDeletedFile(PGLOBAL g); + // Members char *Memory; // Pointer on file mapping view. char *Mempos; // Position of next data to read - char *Fpos; // Position of last read record + char *Fpos; // Position of last read record char *Tpos; // Target Position for delete move char *Spos; // Start position for delete move char *Top; // Mark end of file mapping view @@ -71,6 +74,8 @@ class DllExport MBKFAM : public MAPFAM { // Methods virtual void Reset(void); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s) + {return TXTFAM::MaxBlkSize(g, s);} virtual int GetRowID(void); virtual int SkipRecord(PGLOBAL g, bool header); virtual int ReadBuffer(PGLOBAL g); @@ -96,7 +101,9 @@ class DllExport MPXFAM : public MBKFAM { // Methods virtual int Cardinality(PGLOBAL g) {return TXTFAM::Cardinality(g);} - virtual bool SetPos(PGLOBAL g, int recpos); + virtual int MaxBlkSize(PGLOBAL g, int s) + {return TXTFAM::MaxBlkSize(g, s);} + virtual bool SetPos(PGLOBAL g, int recpos); virtual bool DeferReading(void) {return false;} virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 7ca98eeff55..5d01ee06df0 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -1,11 +1,11 @@ /*********** File AM Dbf C++ Program Source Code File (.CPP) ****************/ /* PROGRAM NAME: FILAMDBF */ /* ------------- */ -/* Version 1.6 */ +/* Version 1.7 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -668,12 +668,9 @@ void DBFFAM::ResetBuffer(PGLOBAL g) /*********************************************************************/ /* If access is random, performances can be much better when the */ /* reads are done on only one row, except for small tables that can */ - /* be entirely read in one block. If the index is just used as a */ - /* bitmap filter, as for Update or delete, reading will be */ - /* sequential and we better keep block reading. */ + /* be entirely read in one block. */ /*********************************************************************/ - if (Tdbp->GetKindex() && Tdbp->GetMode() == MODE_READ && - ReadBlks != 1) { + if (Tdbp->GetKindex() && ReadBlks != 1) { Nrec = 1; // Better for random access Rbuf = 0; Blksize = Lrecl; @@ -763,12 +760,16 @@ int DBFFAM::DeleteRecords(PGLOBAL g, int irc) // T_Stream is the temporary stream or the table file stream itself if (!T_Stream) if (UseTemp) { - if (OpenTempFile(g)) + if ((Indxd = Tdbp->GetKindex() != NULL)) { + strcpy(g->Message, "DBF indexed udate using temp file NIY"); + return RC_FX; + } else if (OpenTempFile(g)) return RC_FX; if (CopyHeader(g)) // For DBF tables return RC_FX; +// Indxd = Tdbp->GetKindex() != NULL; } else T_Stream = Stream; @@ -791,11 +792,13 @@ void DBFFAM::Rewind(void) /***********************************************************************/ /* Table file close routine for DBF access method. */ /***********************************************************************/ -void DBFFAM::CloseTableFile(PGLOBAL g) +void DBFFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + // Closing is True if last Write was in error if (mode == MODE_INSERT && CurNum && !Closing) { // Some more inserted lines remain to be written @@ -810,17 +813,17 @@ void DBFFAM::CloseTableFile(PGLOBAL g) } // endif Modif if (UseTemp && T_Stream && wrc == RC_OK) { - // Copy any remaining lines - bool b; - - Fpos = Tdbp->Cardinality(g); - - if ((rc = MoveIntermediateLines(g, &b)) == RC_OK) { - // Delete the old file and rename the new temp file. - RenameTempFile(g); - goto fin; - } // endif rc + if (!Abort) { + // Copy any remaining lines + bool b; + + Fpos = Tdbp->Cardinality(g); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + // Delete the old file and rename the new temp file. + RenameTempFile(g); + goto fin; } // endif UseTemp } // endif's mode diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h index b85b9fc47fe..0345c0338e8 100644 --- a/storage/connect/filamdbf.h +++ b/storage/connect/filamdbf.h @@ -67,7 +67,7 @@ class DllExport DBFFAM : public FIXFAM, public DBFBASE { virtual void ResetBuffer(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp index 9338ae322db..d6c3906dac3 100644 --- a/storage/connect/filamfix.cpp +++ b/storage/connect/filamfix.cpp @@ -1,7 +1,7 @@ /*********** File AM Fix C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMFIX */ /* ------------- */ -/* Version 1.5 */ +/* Version 1.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -55,6 +55,11 @@ extern "C" int trace; extern int num_read, num_there, num_eq[2]; // Statistics +/***********************************************************************/ +/* Routine called externally by BGXFAM MakeDeletedFile function. */ +/***********************************************************************/ +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); + /* --------------------------- Class FIXFAM -------------------------- */ /***********************************************************************/ @@ -80,6 +85,27 @@ FIXFAM::FIXFAM(PFIXFAM txfp) : BLKFAM(txfp) } // end of FIXFAM copy constructor /***********************************************************************/ +/* SetPos: Replace the table at the specified position. */ +/***********************************************************************/ +bool FIXFAM::SetPos(PGLOBAL g, int pos) + { + if (pos < 0) { + strcpy(g->Message, MSG(INV_REC_POS)); + return true; + } // endif recpos + + CurBlk = pos / Nrec; + CurNum = pos % Nrec; +#if defined(_DEBUG) + num_eq[(CurBlk == OldBlk) ? 1 : 0]++; +#endif + + // Indicate the table position was externally set + Placed = true; + return false; + } // end of SetPos + +/***********************************************************************/ /* Allocate the block buffer for the table. */ /***********************************************************************/ bool FIXFAM::AllocateBuffer(PGLOBAL g) @@ -128,12 +154,9 @@ void FIXFAM::ResetBuffer(PGLOBAL g) /*********************************************************************/ /* If access is random, performances can be much better when the */ /* reads are done on only one row, except for small tables that can */ - /* be entirely read in one block. If the index is just used as a */ - /* bitmap filter as for Update or Delete reading will be sequential */ - /* and we better keep block reading. */ + /* be entirely read in one block. */ /*********************************************************************/ - if (Tdbp->GetMode() == MODE_READ && ReadBlks != 1 && !Padded && - Tdbp->GetKindex() /*&& Tdbp->GetKindex()->IsRandom()*/) { + if (Tdbp->GetKindex() && ReadBlks != 1 && !Padded) { Nrec = 1; // Better for random access Rbuf = 0; Blksize = Lrecl; @@ -169,9 +192,20 @@ int FIXFAM::ReadBuffer(PGLOBAL g) CurNum = 0; Tdbp->SetLine(To_Buf); + next: if (++CurBlk >= Block) return RC_EF; + /*****************************************************************/ + /* Before reading a new block, check whether block indexing */ + /* can be done, as well as for join as for local filtering. */ + /*****************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc } // endif's if (OldBlk == CurBlk) { @@ -304,18 +338,22 @@ int FIXFAM::WriteBuffer(PGLOBAL g) } else { // Mode == MODE_UPDATE // T_Stream is the temporary stream or the table file stream itself - if (!T_Stream) - { - if (UseTemp /*&& Tdbp->GetMode() == MODE_UPDATE*/) { - if (OpenTempFile(g)) + if (!T_Stream) { + if (UseTemp) { + if ((Indxd = Tdbp->GetKindex() != NULL)) { + strcpy(g->Message, "FIX indexed udate using temp file NIY"); return RC_FX; - - if (CopyHeader(g)) // For DBF tables + } else if (OpenTempFile(g)) + return RC_FX; + else if (CopyHeader(g)) // For DBF tables return RC_FX; +// Indxd = Tdbp->GetKindex() != NULL; } else T_Stream = Stream; - } + + } // endif T_Stream + Modif++; // Modified line in Update mode } // endif Mode @@ -375,12 +413,17 @@ int FIXFAM::DeleteRecords(PGLOBAL g, int irc) Spos = Tpos = Fpos; } // endif UseTemp + Indxd = Tdbp->GetKindex() != NULL; } // endif Tpos == Spos /*********************************************************************/ /* Move any intermediate lines. */ /*********************************************************************/ - if (MoveIntermediateLines(g, &moved)) + if (Indxd) { + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + moved = false; + } else if (MoveIntermediateLines(g, &moved)) return RC_FX; if (irc == RC_OK) { @@ -413,6 +456,9 @@ int FIXFAM::DeleteRecords(PGLOBAL g, int irc) return RC_FX; } else { + if (Indxd && (Abort = MakeDeletedFile(g))) + return RC_FX; + /*****************************************************************/ /* Because the chsize functionality is only accessible with a */ /* system call we must close the file and reopen it with the */ @@ -514,13 +560,68 @@ bool FIXFAM::MoveIntermediateLines(PGLOBAL g, bool *b) } // end of MoveIntermediate Lines /***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleing them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the deleted record and make the new */ +/* deleted file from the ordered deleted records. */ +/***********************************************************************/ +bool FIXFAM::MakeDeletedFile(PGLOBAL g) + { + const char *crlf = "\n", *mode = UseTemp ? "rb" : "r+b"; + int *ix, i; + bool moved; + + /*********************************************************************/ + /* Open the temporary file, Spos is at the beginning of file. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + Spos = 0; + + for (i = 0; i < Posar->GetNval(); i++) { + Fpos = Posar->GetIntValue(ix[i]); + + if (i || UseTemp) { + // Copy all not updated lines preceding this one + if (MoveIntermediateLines(g, &moved)) + goto err; + + } else + Tpos = Fpos; + + // New start position + Spos = Fpos + 1; + } // endfor i + + if (!PlugCloseFile(g, To_Fbt) && !PlugCloseFile(g, To_Fb)) + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + PlugCloseFile(g, To_Fbt); + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Table file close routine for FIX access method. */ /***********************************************************************/ -void FIXFAM::CloseTableFile(PGLOBAL g) +void FIXFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + // Closing is True if last Write was in error if (mode == MODE_INSERT && CurNum && !Closing) { // Some more inserted lines remain to be written @@ -535,17 +636,18 @@ void FIXFAM::CloseTableFile(PGLOBAL g) } // endif Modif if (UseTemp && T_Stream && wrc == RC_OK) { - // Copy any remaining lines - bool b; - - Fpos = Tdbp->Cardinality(g); - - if ((rc = MoveIntermediateLines(g, &b)) == RC_OK) { - // Delete the old file and rename the new temp file. - RenameTempFile(g); - goto fin; - } // endif rc - + if (!Abort) { + // Copy any remaining lines + bool b; + + // Note: Indxd is not implemented yet + Fpos = Tdbp->Cardinality(g); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + + // Delete the old file and rename the new temp file. + RenameTempFile(g); + goto fin; } // endif UseTemp } // endif's mode @@ -1013,9 +1115,21 @@ int BGXFAM::ReadBuffer(PGLOBAL g) CurNum = 0; Tdbp->SetLine(To_Buf); + next: if (++CurBlk >= Block) return RC_EF; + /*****************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*****************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + } // endif's if (OldBlk == CurBlk) { @@ -1119,7 +1233,10 @@ int BGXFAM::WriteBuffer(PGLOBAL g) if (Tfile == INVALID_HANDLE_VALUE) { if (UseTemp /*&& Tdbp->GetMode() == MODE_UPDATE*/) { - if (OpenTempFile(g)) + if ((Indxd = Tdbp->GetKindex() != NULL)) { + strcpy(g->Message, "FIX indexed udate using temp file NIY"); + return RC_FX; + } else if (OpenTempFile(g)) return RC_FX; } else @@ -1186,15 +1303,19 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) Spos = Tpos = Fpos; } // endif UseTemp + Indxd = Tdbp->GetKindex() != NULL; } // endif Tpos == Spos /*********************************************************************/ /* Move any intermediate lines. */ /*********************************************************************/ - if (MoveIntermediateLines(g, &moved)) + if (Indxd) + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + else if (MoveIntermediateLines(g, &moved)) return RC_FX; - if (irc == RC_OK) { + if (irc == RC_OK && !Indxd) { if (trace) assert(Spos == Fpos); @@ -1210,14 +1331,10 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) if (trace > 1) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); - } else { + } else if (irc != RC_OK) { /*******************************************************************/ /* Last call after EOF has been reached. */ /*******************************************************************/ - char filename[_MAX_PATH]; - - PlugSetPath(filename, To_File, Tdbp->GetPath()); - if (UseTemp) { /*****************************************************************/ /* Ok, now delete old file and rename new temp file. */ @@ -1226,6 +1343,9 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) return RC_FX; } else { + if (Indxd && (Abort = MakeDeletedFile(g))) + return RC_FX; + /*****************************************************************/ /* Remove extra records. */ /*****************************************************************/ @@ -1350,13 +1470,68 @@ bool BGXFAM::MoveIntermediateLines(PGLOBAL g, bool *b) } // end of MoveIntermediateLines /***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleing them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the deleted record and make the new */ +/* deleted file from the ordered deleted records. */ +/***********************************************************************/ +bool BGXFAM::MakeDeletedFile(PGLOBAL g) + { + const char *crlf = "\n", *mode = UseTemp ? "rb" : "r+b"; + int *ix, i; + bool moved; + + /*********************************************************************/ + /* Open the temporary file, Spos is at the beginning of file. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + Spos = 0; + + for (i = 0; i < Posar->GetNval(); i++) { + Fpos = Posar->GetIntValue(ix[i]); + + if (i || UseTemp) { + // Copy all not updated lines preceding this one + if (MoveIntermediateLines(g, &moved)) + goto err; + + } else + Tpos = Fpos; + + // New start position + Spos = Fpos + 1; + } // endfor i + + if (!PlugCloseFile(g, To_Fbt)) + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + PlugCloseFile(g, To_Fbt); + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Data Base close routine for BIGFIX access method. */ /***********************************************************************/ -void BGXFAM::CloseTableFile(PGLOBAL g) +void BGXFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + // Closing is True if last Write was in error if (mode == MODE_INSERT && CurNum && !Closing) { // Some more inserted lines remain to be written @@ -1370,17 +1545,18 @@ void BGXFAM::CloseTableFile(PGLOBAL g) } // endif Modif if (UseTemp && Tfile && wrc == RC_OK) { - // Copy any remaining lines - bool b; - - Fpos = Tdbp->Cardinality(g); - - if ((rc = MoveIntermediateLines(g, &b)) == RC_OK) { - // Delete the old file and rename the new temp file. - RenameTempFile(g); - goto fin; - } // endif rc - + if (!Abort) { + // Copy any remaining lines + bool b; + + // Indxd is not implemented yet + Fpos = Tdbp->Cardinality(g); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + + // Delete the old file and rename the new temp file. + RenameTempFile(g); + goto fin; } // endif UseTemp } // endif's mode diff --git a/storage/connect/filamfix.h b/storage/connect/filamfix.h index 6f9e6ef3b45..a29bfbbeb48 100644 --- a/storage/connect/filamfix.h +++ b/storage/connect/filamfix.h @@ -1,7 +1,7 @@ /************** FilAMFix H Declares Source Code File (.H) **************/ -/* Name: FILAMFIX.H Version 1.2 */ +/* Name: FILAMFIX.H Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005 - 2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2005 - 2014 */ /* */ /* This file contains the FIX file access method classes declares. */ /***********************************************************************/ @@ -25,22 +25,26 @@ class DllExport FIXFAM : public BLKFAM { FIXFAM(PFIXFAM txfp); // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_FIX;} - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) FIXFAM(this);} + virtual AMT GetAmType(void) {return TYPE_AM_FIX;} + virtual PTXF Duplicate(PGLOBAL g) + {return (PTXF)new(g) FIXFAM(this);} // Methods virtual int Cardinality(PGLOBAL g) {return TXTFAM::Cardinality(g);} + virtual int MaxBlkSize(PGLOBAL g, int s) + {return TXTFAM::MaxBlkSize(g, s);} + virtual bool SetPos(PGLOBAL g, int recpos); virtual bool AllocateBuffer(PGLOBAL g); virtual void ResetBuffer(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); protected: virtual bool CopyHeader(PGLOBAL g) {return false;} virtual bool MoveIntermediateLines(PGLOBAL g, bool *b); + virtual bool MakeDeletedFile(PGLOBAL g); // No additional members }; // end of class FIXFAM @@ -58,25 +62,26 @@ class BGXFAM : public FIXFAM { BGXFAM(PBGXFAM txfp); // Implementation - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) BGXFAM(this);} + virtual PTXF Duplicate(PGLOBAL g) + {return (PTXF)new(g) BGXFAM(this);} // Methods - virtual int Cardinality(PGLOBAL g); - virtual bool OpenTableFile(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); + virtual int Cardinality(PGLOBAL g); + virtual bool OpenTableFile(PGLOBAL g); + virtual int ReadBuffer(PGLOBAL g); + virtual int WriteBuffer(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); + virtual void Rewind(void); protected: - bool BigSeek(PGLOBAL g, HANDLE h, BIGINT pos - , int org = FILE_BEGIN); - int BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req); - bool BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); + virtual bool MakeDeletedFile(PGLOBAL g); + int BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req); + bool BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req); + bool BigSeek(PGLOBAL g, HANDLE h, BIGINT pos + , int org = FILE_BEGIN); // Members HANDLE Hfile; // Handle(descriptor) to big file diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index e4e9130dc86..9c0cd51458d 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1,11 +1,11 @@ /*********** File AM Txt C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMTXT */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -58,6 +58,11 @@ extern int num_read, num_there, num_eq[2]; // Statistics extern "C" int trace; +/***********************************************************************/ +/* Routine called externally by DOSFAM MakeUpdatedFile function. */ +/***********************************************************************/ +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); + /* --------------------------- Class TXTFAM -------------------------- */ /***********************************************************************/ @@ -75,6 +80,12 @@ TXTFAM::TXTFAM(PDOSDEF tdp) To_Buf = NULL; DelBuf = NULL; BlkPos = NULL; + To_Pos = NULL; + To_Sos = NULL; + To_Upd = NULL; + Posar = NULL; + Sosar = NULL; + Updar = NULL; BlkLen = 0; Buflen = 0; Dbflen = 0; @@ -94,7 +105,9 @@ TXTFAM::TXTFAM(PDOSDEF tdp) Padded = false; Eof = tdp->Eof; Ending = tdp->Ending; - CrLf = (char*)(Ending == 2 ? "\r\n" : "\n"); + Indxd = false; + Abort = false; + CrLf = (char*)(Ending == 1 ? "\n" : "\r\n"); } // end of TXTFAM standard constructor TXTFAM::TXTFAM(PTXF txfp) @@ -109,6 +122,12 @@ TXTFAM::TXTFAM(PTXF txfp) To_Buf = txfp->To_Buf; DelBuf = txfp->DelBuf; BlkPos = txfp->BlkPos; + To_Pos = txfp->To_Pos; + To_Sos = txfp->To_Sos; + To_Upd = txfp->To_Upd; + Posar = txfp->Posar; + Sosar = txfp->Sosar; + Updar = txfp->Updar; BlkLen = txfp->BlkLen; Buflen = txfp->Buflen; Dbflen = txfp->Dbflen; @@ -128,6 +147,9 @@ TXTFAM::TXTFAM(PTXF txfp) Padded = txfp->Padded; Eof = txfp->Eof; Ending = txfp->Ending; + Indxd = txfp->Indxd; + Abort = txfp->Abort; + CrLf = txfp->CrLf; } // end of TXTFAM copy constructor /***********************************************************************/ @@ -151,9 +173,9 @@ void TXTFAM::Reset(void) /***********************************************************************/ int TXTFAM::GetFileLength(PGLOBAL g) { - char filename[_MAX_PATH]; - int h; - int len; + char filename[_MAX_PATH]; + int h; + int len; PlugSetPath(filename, To_File, Tdbp->GetPath()); h= global_open(g, MSGID_OPEN_MODE_STRERROR, filename, _O_RDONLY); @@ -165,13 +187,13 @@ int TXTFAM::GetFileLength(PGLOBAL g) if (errno != ENOENT) { if (trace) htrc("%s\n", g->Message); + len = -1; - } - else - { + } else { len = 0; // File does not exist yet g->Message[0]= '\0'; - } + } // endif errno + } else { if ((len = _filelength(h)) < 0) sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", filename); @@ -214,7 +236,7 @@ int TXTFAM::Cardinality(PGLOBAL g) } // endif Padded if (trace) - htrc(" Computed max_K=%d Filen=%d lrecl=%d\n", + htrc(" Computed max_K=%d Filen=%d lrecl=%d\n", card, len, Lrecl); } else @@ -228,6 +250,58 @@ int TXTFAM::Cardinality(PGLOBAL g) } // end of Cardinality +/***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/* Note: This function is meant only for fixed length files but is */ +/* placed here to be available to FIXFAM and MPXFAM classes. */ +/***********************************************************************/ +int TXTFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk, blm1 = Block - 1; + int size, last = s - blm1 * Nrec; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == blm1) ? last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ +/* AddListValue: Used when doing indexed update or delete. */ +/***********************************************************************/ +bool TXTFAM::AddListValue(PGLOBAL g, int type, void *val, PPARM *top) + { + PPARM pp = (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM)); + + switch (type) { + case TYPE_INT: + pp->Value = PlugSubAlloc(g, NULL, sizeof(int)); + *((int*)pp->Value) = *((int*)val); + break; + case TYPE_STRING: + pp->Value = PlugSubAlloc(g, NULL, strlen((char*)val) + 1); + strcpy((char*)pp->Value, (char*)val); + break; + case TYPE_PCHAR: + pp->Value = val; + break; + default: + return true; + } // endswitch type + + pp->Type = type; + pp->Domain = 0; + pp->Next = *top; + *top = pp; + return false; + } // end of AddListValue + /* --------------------------- Class DOSFAM -------------------------- */ /***********************************************************************/ @@ -255,6 +329,17 @@ DOSFAM::DOSFAM(PDOSFAM tdfp) : TXTFAM(tdfp) Bin = tdfp->Bin; } // end of DOSFAM copy constructor +DOSFAM::DOSFAM(PBLKFAM tdfp, PDOSDEF tdp) : TXTFAM(tdp) + { + Tdbp = tdfp->Tdbp; + To_Fb = tdfp->To_Fb; + To_Fbt = tdfp->To_Fbt; + Stream = tdfp->Stream; + T_Stream = tdfp->T_Stream; + UseTemp = tdfp->UseTemp; + Bin = tdfp->Bin; + } // end of DOSFAM constructor from BLKFAM + /***********************************************************************/ /* Reset: reset position values at the beginning of file. */ /***********************************************************************/ @@ -295,6 +380,15 @@ int DOSFAM::Cardinality(PGLOBAL g) } // end of Cardinality /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/* Note: This function is not really implemented yet. */ +/***********************************************************************/ +int DOSFAM::MaxBlkSize(PGLOBAL g, int s) + { + return s; + } // end of MaxBlkSize + +/***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */ /***********************************************************************/ bool DOSFAM::OpenTableFile(PGLOBAL g) @@ -304,8 +398,8 @@ bool DOSFAM::OpenTableFile(PGLOBAL g) MODE mode = Tdbp->Mode; PDBUSER dbuserp = PlgGetUser(g); - // This is required when using Unix files under Windows - Bin = (Ending == 1); + // This is required when using Unix files under Windows and vice versa + Bin = (Ending != CRLF); switch (mode) { case MODE_READ: @@ -381,7 +475,7 @@ bool DOSFAM::AllocateBuffer(PGLOBAL g) MODE mode = Tdbp->Mode; // Lrecl does not include line ending - Buflen = Lrecl + Ending + ((Bin) ? 1 : 0); + Buflen = Lrecl + Ending + ((Bin) ? 1 : 0) + 1; if (trace) htrc("SubAllocating a buffer of %d bytes\n", Buflen); @@ -509,19 +603,35 @@ int DOSFAM::ReadBuffer(PGLOBAL g) if (trace > 1) htrc("ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n", - Tdbp, Tdbp->To_Line, Placed); + Tdbp, Tdbp->To_Line, Placed); if (!Placed) { /*******************************************************************/ /* Record file position in case of UPDATE or DELETE. */ /*******************************************************************/ + next: if (RecordPos(g)) return RC_FX; CurBlk = (int)Rows++; - if (trace > 1) - htrc("ReadBuffer: CurBlk=%d\n", CurBlk); + if (trace > 1) + htrc("ReadBuffer: CurBlk=%d\n", CurBlk); + + /********************************************************************/ + /* Check whether optimization on ROWID */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + // Skip this record + if ((rc = SkipRecord(g, FALSE)) != RC_OK) + return rc; + + goto next; + } // endswitch rc } else Placed = false; @@ -594,19 +704,21 @@ int DOSFAM::ReadBuffer(PGLOBAL g) /***********************************************************************/ int DOSFAM::WriteBuffer(PGLOBAL g) { - char *crlf = "\n"; - int curpos = 0; + int curpos = 0; bool moved = true; // T_Stream is the temporary stream or the table file stream itself - if (!T_Stream) + if (!T_Stream) { if (UseTemp && Tdbp->Mode == MODE_UPDATE) { if (OpenTempFile(g)) return RC_FX; + Indxd = Tdbp->To_Kindex != NULL; } else T_Stream = Stream; + } // endif T_Stream + if (Tdbp->Mode == MODE_UPDATE) { /*******************************************************************/ /* Here we simply rewrite a record on itself. There are two cases */ @@ -622,14 +734,21 @@ int DOSFAM::WriteBuffer(PGLOBAL g) if (UseTemp) { /*****************************************************************/ - /* We are using a temporary file. Before writing the updated */ - /* record, we must eventually copy all the intermediate records */ - /* that have not been updated. */ + /* We are using a temporary file. */ /*****************************************************************/ - if (MoveIntermediateLines(g, &moved)) - return RC_FX; + if (Indxd) { + // Copying will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + (void)AddListValue(g, TYPE_INT, &curpos, &To_Sos); + } else { + // Before writing the updated record, we must eventually copy + // all the intermediate records that have not been updated. + if (MoveIntermediateLines(g, &moved)) + return RC_FX; + + Spos = curpos; // New start position + } // endif Indxd - Spos = curpos; // New start position } else // Update is directly written back into the file, // with this (fast) method, record size cannot change. @@ -641,30 +760,30 @@ int DOSFAM::WriteBuffer(PGLOBAL g) } // endif mode /*********************************************************************/ - /* Prepare the write buffer. */ - /*********************************************************************/ -#if defined(WIN32) - if (Bin) - crlf = "\r\n"; -#endif // WIN32 - strcat(strcpy(To_Buf, Tdbp->To_Line), crlf); - - /*********************************************************************/ - /* Now start the writing process. */ + /* Prepare the write the updated line. */ /*********************************************************************/ - if ((fputs(To_Buf, T_Stream)) == EOF) { - sprintf(g->Message, MSG(FPUTS_ERROR), strerror(errno)); - return RC_FX; - } // endif EOF + if (!Indxd) { + strcat(strcpy(To_Buf, Tdbp->To_Line), (Bin) ? CrLf : "\n"); - if (Tdbp->Mode == MODE_UPDATE && moved) - if (fseek(Stream, curpos, SEEK_SET)) { - sprintf(g->Message, MSG(FSEEK_ERROR), strerror(errno)); + /*******************************************************************/ + /* Now start the writing process. */ + /*******************************************************************/ + if ((fputs(To_Buf, T_Stream)) == EOF) { + sprintf(g->Message, MSG(FPUTS_ERROR), strerror(errno)); return RC_FX; - } // endif + } // endif EOF - if (trace) - htrc("write done\n"); + if (Tdbp->Mode == MODE_UPDATE && moved) + if (fseek(Stream, curpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSEEK_ERROR), strerror(errno)); + return RC_FX; + } // endif + + if (trace) + htrc("write done\n"); + + } else // Add this updated line to the updated line list + (void)AddListValue(g, TYPE_STRING, Tdbp->To_Line, &To_Upd); return RC_OK; } // end of WriteBuffer @@ -675,7 +794,7 @@ int DOSFAM::WriteBuffer(PGLOBAL g) int DOSFAM::DeleteRecords(PGLOBAL g, int irc) { bool moved; - int curpos = ftell(Stream); + int curpos = ftell(Stream); /*********************************************************************/ /* There is an alternative here: */ @@ -684,8 +803,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) /* the temporary file renamed to the original file name. */ /* 2 - directly move the not deleted lines inside the original */ /* file, and at the end erase all trailing records. */ - /* This will be experimented, but method 1 must be used for Unix as */ - /* the function needed to erase trailing records is not available. */ + /* This will be experimented. */ /*********************************************************************/ if (trace) htrc( @@ -722,12 +840,18 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) Spos = Tpos = Fpos; } // endif UseTemp + Indxd = Tdbp->To_Kindex != NULL; } // endif Tpos == Spos /*********************************************************************/ /* Move any intermediate lines. */ /*********************************************************************/ - if (MoveIntermediateLines(g, &moved)) + if (Indxd) { + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + (void)AddListValue(g, TYPE_INT, &curpos, &To_Sos); + moved = false; + } else if (MoveIntermediateLines(g, &moved)) return RC_FX; if (irc == RC_OK) { @@ -750,7 +874,10 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) /* Last call after EOF has been reached. */ /* The UseTemp case is treated in CloseTableFile. */ /*******************************************************************/ - if (!UseTemp) { + if (Indxd) + Abort = MakeDeletedFile(g); + + if (!UseTemp & !Abort) { /*****************************************************************/ /* Because the chsize functionality is only accessible with a */ /* system call we must close the file and reopen it with the */ @@ -874,42 +1001,190 @@ bool DOSFAM::MoveIntermediateLines(PGLOBAL g, bool *b) } // end of MoveIntermediate Lines /***********************************************************************/ +/* MakeUpdatedFile. When updating using indexing, the issue is that */ +/* record are not necessarily updated in sequential order. */ +/* Moving intermediate lines cannot be done while making them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the updated record and make the new */ +/* updated file from the ordered updated records. */ +/***********************************************************************/ +bool DOSFAM::MakeUpdatedFile(PGLOBAL g) + { + const char *crlf = "\n", *mode = UseTemp ? "rb" : "r+b"; + int *ix, i; + bool moved, b = false; + + /*********************************************************************/ + /* Open the temporary file, Spos is at the beginning of file. */ + /*********************************************************************/ + if (!(Stream = PlugReopenFile(g, To_Fb, mode))) { + goto err; + } else if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(Sosar = MakeValueArray(g, To_Sos))) { + strcpy(g->Message, "Start position array is null"); + goto err; + } else if (!(Updar = MakeValueArray(g, To_Upd))) { + strcpy(g->Message, "Updated line array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + Spos = 0; + + for (i = 0; i < Posar->GetNval(); i++) { + Fpos = Posar->GetIntValue(ix[i]); + + if (i || UseTemp) { + // Copy all not updated lines preceding this one + if (MoveIntermediateLines(g, &moved)) + goto err; + + } else + Tpos = Fpos; + + // Now write the updated line. + strcat(strcpy(To_Buf, Updar->GetStringValue(ix[i])), CrLf); + + if ((fputs(To_Buf, T_Stream)) == EOF) { + sprintf(g->Message, MSG(FPUTS_ERROR), strerror(errno)); + goto err; + } // endif EOF + + // New start position + Spos = Sosar->GetIntValue(ix[i]); + } // endfor i + + // Copy eventually remaining lines + fseek(Stream, 0, SEEK_END); + Fpos = ftell(Stream); + b = MoveIntermediateLines(g, &moved) != RC_OK; + + if (!PlugCloseFile(g, To_Fbt) && !PlugCloseFile(g, To_Fb) && !b) + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + PlugCloseFile(g, To_Fbt); + return true; + } // end of MakeUpdatedFile + +/***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleing them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the deleted record and make the new */ +/* deleted file from the ordered deleted records. */ +/***********************************************************************/ +bool DOSFAM::MakeDeletedFile(PGLOBAL g) + { + const char *crlf = "\n", *mode = UseTemp ? "rb" : "r+b"; + int *ix, i; + bool moved; + + /*********************************************************************/ + /* Open the temporary file, Spos is at the beginning of file. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(Sosar = MakeValueArray(g, To_Sos))) { + strcpy(g->Message, "Start position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + Spos = 0; + + for (i = 0; i < Posar->GetNval(); i++) { + Fpos = Posar->GetIntValue(ix[i]); + + if (i || UseTemp) { + // Copy all not updated lines preceding this one + if (MoveIntermediateLines(g, &moved)) + goto err; + + } else + Tpos = Fpos; + + // New start position + Spos = Sosar->GetIntValue(ix[i]); + } // endfor i + + if (!PlugCloseFile(g, To_Fbt) && !PlugCloseFile(g, To_Fb)) + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + PlugCloseFile(g, To_Fbt); + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Delete the old file and rename the new temp file. */ +/* If aborting just delete the new temp file. */ +/* If indexed, make the temp file from the arrays. */ /***********************************************************************/ int DOSFAM::RenameTempFile(PGLOBAL g) { char *tempname, filetemp[_MAX_PATH], filename[_MAX_PATH]; - int rc; + int rc = RC_OK; - if (!To_Fbt) + if (To_Fbt) + tempname = (char*)To_Fbt->Fname; + else return RC_INFO; // Nothing to do ??? // This loop is necessary because, in case of join, // To_File can have been open several times. for (PFBLOCK fb = PlgGetUser(g)->Openlist; fb; fb = fb->Next) - if (fb == To_Fb || fb == To_Fbt) + if (fb == To_Fb || (fb == To_Fbt && !Indxd)) rc = PlugCloseFile(g, fb); + + if (!Abort) { + // If indexed the temp file must be made + if (Indxd) { + Abort = (Tdbp->Mode == MODE_UPDATE) ? MakeUpdatedFile(g) + : MakeDeletedFile(g); + + if (Abort) { + remove(tempname); + return RC_FX; + } // endif Abort + + } // endif Indxd + + PlugSetPath(filename, To_File, Tdbp->GetPath()); + strcat(PlugRemoveType(filetemp, filename), ".ttt"); + remove(filetemp); // May still be there from previous error + + if (rename(filename, filetemp)) { // Save file for security + sprintf(g->Message, MSG(RENAME_ERROR), + filename, filetemp, strerror(errno)); + longjmp(g->jumper[g->jump_level], 51); + } else if (rename(tempname, filename)) { + sprintf(g->Message, MSG(RENAME_ERROR), + tempname, filename, strerror(errno)); + rc = rename(filetemp, filename); // Restore saved file + longjmp(g->jumper[g->jump_level], 52); + } else if (remove(filetemp)) { + sprintf(g->Message, MSG(REMOVE_ERROR), + filetemp, strerror(errno)); + rc = RC_INFO; // Acceptable + } // endif's - tempname = (char*)To_Fbt->Fname; - PlugSetPath(filename, To_File, Tdbp->GetPath()); - strcat(PlugRemoveType(filetemp, filename), ".ttt"); - remove(filetemp); // May still be there from previous error - - if (rename(filename, filetemp)) { // Save file for security - sprintf(g->Message, MSG(RENAME_ERROR), - filename, filetemp, strerror(errno)); - rc = RC_FX; - } else if (rename(tempname, filename)) { - sprintf(g->Message, MSG(RENAME_ERROR), - tempname, filename, strerror(errno)); - rc = rename(filetemp, filename); // Restore saved file - rc = RC_FX; - } else if (remove(filetemp)) { - sprintf(g->Message, MSG(REMOVE_ERROR), - filetemp, strerror(errno)); - rc = RC_INFO; // Acceptable } else - rc = RC_OK; + remove(tempname); return rc; } // end of RenameTempFile @@ -917,22 +1192,24 @@ int DOSFAM::RenameTempFile(PGLOBAL g) /***********************************************************************/ /* Table file close routine for DOS access method. */ /***********************************************************************/ -void DOSFAM::CloseTableFile(PGLOBAL g) +void DOSFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc; + Abort = abort; + if (UseTemp && T_Stream) { - if (Tdbp->Mode == MODE_UPDATE) { + if (Tdbp->Mode == MODE_UPDATE && !Indxd && !Abort) { // Copy eventually remaining lines bool b; fseek(Stream, 0, SEEK_END); Fpos = ftell(Stream); - rc = MoveIntermediateLines(g, &b); - } // endif Mode + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort // Delete the old file and rename the new temp file. - RenameTempFile(g); // Also close all files + rc = RenameTempFile(g); // Also close all files } else { rc = PlugCloseFile(g, To_Fb); @@ -942,6 +1219,7 @@ void DOSFAM::CloseTableFile(PGLOBAL g) } // endif UseTemp Stream = NULL; // So we can know whether table is open + T_Stream = NULL; } // end of CloseTableFile /***********************************************************************/ @@ -968,7 +1246,7 @@ BLKFAM::BLKFAM(PDOSDEF tdp) : DOSFAM(tdp) Last = tdp->GetLast(); Nrec = tdp->GetElemt(); Closing = false; - BlkPos = NULL; + BlkPos = tdp->GetTo_Pos(); CurLine = NULL; NxtLine = NULL; OutBuf = NULL; @@ -998,12 +1276,30 @@ void BLKFAM::Reset(void) /***********************************************************************/ int BLKFAM::Cardinality(PGLOBAL g) { - // Should not be called in this version - return (g) ? -1 : 0; -//return (g) ? (int)((Block - 1) * Nrec + Last) : 1; + return (g) ? ((Block > 0) ? (int)((Block - 1) * Nrec + Last) : 0) : 1; } // end of Cardinality /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/***********************************************************************/ +int BLKFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk; + int size; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == Block - 1) ? Last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ /* Allocate the line buffer. For mode Delete or when a temp file is */ /* used another big buffer has to be allocated because is it used */ /* to move or update the lines into the (temp) file. */ @@ -1059,20 +1355,8 @@ int BLKFAM::GetNextPos(void) /***********************************************************************/ bool BLKFAM::SetPos(PGLOBAL g, int pos) { - if (pos < 0) { - strcpy(g->Message, MSG(INV_REC_POS)); - return true; - } // endif recpos - - CurBlk = pos / Nrec; - CurNum = pos % Nrec; -#if defined(_DEBUG) - num_eq[(CurBlk == OldBlk) ? 1 : 0]++; -#endif - - // Indicate the table position was externally set - Placed = true; - return false; + strcpy(g->Message, "Blocked variable tables cannot be used indexed"); + return true; } // end of SetPos /***********************************************************************/ @@ -1108,8 +1392,109 @@ int BLKFAM::SkipRecord(PGLOBAL g, bool header) /***********************************************************************/ int BLKFAM::ReadBuffer(PGLOBAL g) { - strcpy(g->Message, "This AM cannot be used in this version"); - return RC_FX; + int i, n, rc = RC_OK; + + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Placed = false; + } else if (++CurNum < Rbuf) { + CurLine = NxtLine; + + // Get the position of the next line in the buffer + while (*NxtLine++ != '\n') ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + goto fin; + } else if (Rbuf < Nrec && CurBlk != -1) { + return RC_EF; + } else { + /*******************************************************************/ + /* New block. */ + /*******************************************************************/ + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + + } // endif's + + if (OldBlk == CurBlk) + goto ok; // Block is already there + + // fseek is required only in non sequential reading + if (CurBlk != OldBlk + 1) + if (fseek(Stream, BlkPos[CurBlk], SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), BlkPos[CurBlk]); + return RC_FX; + } // endif fseek + + // Calculate the length of block to read + BlkLen = BlkPos[CurBlk + 1] - BlkPos[CurBlk]; + + if (trace) + htrc("File position is now %d\n", ftell(Stream)); + + // Read the entire next block + n = fread(To_Buf, 1, (size_t)BlkLen, Stream); + + if (n == BlkLen) { +// ReadBlks++; + num_read++; + Rbuf = (CurBlk == Block - 1) ? Last : Nrec; + + ok: + rc = RC_OK; + + // Get the position of the current line + for (i = 0, CurLine = To_Buf; i < CurNum; i++) + while (*CurLine++ != '\n') ; // What about Unix ??? + + // Now get the position of the next line + for (NxtLine = CurLine; *NxtLine++ != '\n';) ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + } else if (feof(Stream)) { + rc = RC_EF; + } else { +#if defined(UNIX) + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#endif + + if (trace) + htrc("%s\n", g->Message); + + return RC_FX; + } // endelse + + OldBlk = CurBlk; // Last block actually read + IsRead = true; // Is read indeed + + fin: + // Store the current record file position for Delete and Update + Fpos = BlkPos[CurBlk] + CurLine - To_Buf; + return rc; } // end of ReadBuffer /***********************************************************************/ @@ -1150,7 +1535,7 @@ int BLKFAM::WriteBuffer(PGLOBAL g) /*******************************************************************/ /* Mode == MODE_UPDATE. */ /*******************************************************************/ - char *crlf; + const char *crlf; size_t len; int curpos = ftell(Stream); bool moved = true; @@ -1214,27 +1599,24 @@ int BLKFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ /* Table file close routine for DOS access method. */ /***********************************************************************/ -void BLKFAM::CloseTableFile(PGLOBAL g) +void BLKFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc, wrc = RC_OK; + Abort = abort; + if (UseTemp && T_Stream) { - if (Tdbp->GetMode() == MODE_UPDATE) { + if (Tdbp->GetMode() == MODE_UPDATE && !Abort) { // Copy eventually remaining lines bool b; fseek(Stream, 0, SEEK_END); Fpos = ftell(Stream); - rc = MoveIntermediateLines(g, &b); - } else - rc = RC_OK; - - if (rc == RC_OK) - // Delete the old file and rename the new temp file. - rc = RenameTempFile(g); // Also close all files - else - rc = PlugCloseFile(g, To_Fb); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + // Delete the old file and rename the new temp file. + rc = RenameTempFile(g); // Also close all files } else { // Closing is True if last Write was in error if (Tdbp->GetMode() == MODE_INSERT && CurNum && !Closing) { diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index c3ee96ada1a..83c93ecc6f2 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -1,7 +1,7 @@ /************** FilAMTxt H Declares Source Code File (.H) **************/ -/* Name: FILAMTXT.H Version 1.2 */ +/* Name: FILAMTXT.H Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* This file contains the file access method classes declares. */ /***********************************************************************/ @@ -10,6 +10,7 @@ #define __FILAMTXT_H #include "block.h" +#include "array.h" typedef class TXTFAM *PTXF; typedef class DOSFAM *PDOSFAM; @@ -53,26 +54,35 @@ class DllExport TXTFAM : public BLOCK { virtual void Reset(void); virtual int GetFileLength(PGLOBAL g); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g) {return false;} virtual void ResetBuffer(PGLOBAL g) {} virtual int GetNerr(void) {return 0;} virtual int GetRowID(void) = 0; virtual bool RecordPos(PGLOBAL g) = 0; - virtual bool SetPos(PGLOBAL g, int recpos) = 0; + virtual bool SetPos(PGLOBAL g, int recpos) = 0; virtual int SkipRecord(PGLOBAL g, bool header) = 0; virtual bool OpenTableFile(PGLOBAL g) = 0; virtual bool DeferReading(void) {IsRead = false; return true;} virtual int ReadBuffer(PGLOBAL g) = 0; virtual int WriteBuffer(PGLOBAL g) = 0; - virtual int DeleteRecords(PGLOBAL g, int irc) = 0; - virtual void CloseTableFile(PGLOBAL g) = 0; + virtual int DeleteRecords(PGLOBAL g, int irc) = 0; + virtual void CloseTableFile(PGLOBAL g, bool abort) = 0; virtual void Rewind(void) = 0; protected: + bool AddListValue(PGLOBAL g, int type, void *val, PPARM *top); + // Members PTDBDOS Tdbp; // To table class PSZ To_File; // Points to table file name PFBLOCK To_Fb; // Pointer to file block + PPARM To_Pos; // Pointer to position list + PPARM To_Sos; // Pointer to start position list + PPARM To_Upd; // Pointer to udated line list + PARRAY Posar; // Pointer to position array + PARRAY Sosar; // Pointer to start position array + PARRAY Updar; // Pointer to udated lines array bool Placed; // true if Recpos was externally set bool IsRead; // false for deferred reading bool Blocked; // true if using blocked I/O @@ -99,6 +109,8 @@ class DllExport TXTFAM : public BLOCK { int Ending; // Length of line end bool Padded; // true if fixed size blocks are padded bool Eof; // true if an EOF (0xA) character exists + bool Indxd; // True for indexed UPDATE/DELETE + bool Abort; // To abort on error char *CrLf; // End of line character(s) }; // end of class TXTFAM @@ -111,6 +123,7 @@ class DllExport DOSFAM : public TXTFAM { // Constructor DOSFAM(PDOSDEF tdp); DOSFAM(PDOSFAM txfp); + DOSFAM(PBLKFAM tdfp, PDOSDEF tdp); // Implementation virtual AMT GetAmType(void) {return TYPE_AM_DOS;} @@ -124,22 +137,25 @@ class DllExport DOSFAM : public TXTFAM { virtual void Reset(void); virtual int GetFileLength(PGLOBAL g); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g); virtual int GetRowID(void); virtual bool RecordPos(PGLOBAL g); - virtual bool SetPos(PGLOBAL g, int recpos); + virtual bool SetPos(PGLOBAL g, int recpos); virtual int SkipRecord(PGLOBAL g, bool header); virtual bool OpenTableFile(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b); virtual int RenameTempFile(PGLOBAL g); + virtual bool MakeUpdatedFile(PGLOBAL g); + virtual bool MakeDeletedFile(PGLOBAL g); // Members FILE *Stream; // Points to Dos file structure @@ -147,8 +163,8 @@ class DllExport DOSFAM : public TXTFAM { PFBLOCK To_Fbt; // Pointer to temp file block int Fpos; // Position of last read record int Tpos; // Target Position for delete move - int Spos; // Start position for delete move - bool UseTemp; // True to use a temporary file in Delete + int Spos; // Start position for update/delete move + bool UseTemp; // True to use a temporary file in Upd/Del bool Bin; // True to force binary mode }; // end of class DOSFAM @@ -172,14 +188,15 @@ class DllExport BLKFAM : public DOSFAM { // Methods virtual void Reset(void); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g); virtual int GetRowID(void); virtual bool RecordPos(PGLOBAL g); - virtual bool SetPos(PGLOBAL g, int recpos); + virtual bool SetPos(PGLOBAL g, int recpos); virtual int SkipRecord(PGLOBAL g, bool header); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index edadc25b50b..34f55cf831b 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -1,16 +1,16 @@ /*********** File AM Vct C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMVCT */ /* ------------- */ -/* Version 2.4 */ +/* Version 2.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ /* This program are the VCT file access method classes. */ -/* Added in version 2: F */ +/* Added in version 2: */ /* - Split Vec format. */ /* - Partial delete. */ /* - Use of tempfile for update. */ @@ -29,7 +29,7 @@ #endif // __BORLAND__ //#include <windows.h> #include <sys/stat.h> -#else // !WIN32 F +#else // !WIN32 #if defined(UNIX) #include <sys/types.h> #include <sys/stat.h> @@ -93,6 +93,11 @@ typedef struct _vecheader { PVBLK AllocValBlock(PGLOBAL, void *, int, int, int, int, bool check = true, bool blank = true, bool un = false); +/***********************************************************************/ +/* Routine called externally by VCTFAM MakeUpdatedFile function. */ +/***********************************************************************/ +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); + /* -------------------------- Class VCTFAM --------------------------- */ /***********************************************************************/ @@ -137,6 +142,39 @@ VCTFAM::VCTFAM(PVCTFAM txfp) : FIXFAM(txfp) } // end of VCTFAM copy constructor /***********************************************************************/ +/* VCT GetFileLength: returns file size in number of bytes. */ +/* This function is here to be accessible by VECFAM and VMPFAM. */ +/***********************************************************************/ +int VCTFAM::GetFileLength(PGLOBAL g) + { + if (Split) { + // Get the total file length + char filename[_MAX_PATH]; + char *savfile = To_File; + int i, len = 0; + + // Initialize the array of file structures + if (!Colfn) { + // Prepare the column file name pattern and set Ncol + Colfn = (char*)PlugSubAlloc(g, NULL, _MAX_PATH); + Ncol = ((PVCTDEF)Tdbp->GetDef())->MakeFnPattern(Colfn); + } // endif Colfn + + To_File = filename; + + for (i = 0; i < Ncol; i++) { + sprintf(filename, Colfn, i+1); + len += TXTFAM::GetFileLength(g); + } // endfor i + + To_File = savfile; + return len; + } else + return TXTFAM::GetFileLength(g); + + } // end of GetFileLength + +/***********************************************************************/ /* Reset read/write position values. */ /***********************************************************************/ void VCTFAM::Reset(void) @@ -170,7 +208,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g) if ((h = global_open(g, MSGID_CANNOT_OPEN, filename, O_RDONLY)) == -1 || !_filelength(h)) { // Consider this is a void table - Last = Nrec; + Last = Nrec; Block = 0; if (h != -1) @@ -179,7 +217,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g) return n; } else if (Header == 3) k = lseek(h, -(int)sizeof(VECHEADER), SEEK_END); - + if ((k = read(h, &vh, sizeof(vh))) != sizeof(vh)) { sprintf(g->Message, "Error reading header file %s", filename); n = -1; @@ -187,7 +225,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g) sprintf(g->Message, "MaxRec=%d doesn't match MaxBlk=%d Nrec=%d", vh.MaxRec, MaxBlk, Nrec); n = -1; - } else { + } else { Block = (vh.NumRec > 0) ? (vh.NumRec + Nrec - 1) / Nrec : 0; Last = (vh.NumRec + Nrec - 1) % Nrec + 1; } // endif s @@ -245,6 +283,26 @@ bool VCTFAM::SetBlockInfo(PGLOBAL g) } // end of SetBlockInfo /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/***********************************************************************/ +int VCTFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk; + int size; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == Block - 1) ? Last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ /* VCT Cardinality: returns table cardinality in number of rows. */ /* This function can be called with a null argument to test the */ /* availability of Cardinality implementation (1 yes, 0 no). */ @@ -264,20 +322,20 @@ int VCTFAM::Cardinality(PGLOBAL g) PSZ savfn = To_File; int len, clen, card = -1; PCOLDEF cdp = Tdbp->GetDef()->GetCols(); - + if (!Colfn) { // Prepare the column file name pattern Colfn = (char*)PlugSubAlloc(g, NULL, _MAX_PATH); Ncol = ((VCTDEF*)Tdbp->GetDef())->MakeFnPattern(Colfn); } // endif Colfn - + // Use the first column file to calculate the cardinality clen = cdp->GetClen(); sprintf(filename, Colfn, 1); To_File = filename; - len = GetFileLength(g); + len = TXTFAM::GetFileLength(g); To_File = savfn; - + if (len >= 0) { if (!(len % clen)) card = len / clen; // Fixed length file @@ -289,7 +347,7 @@ int VCTFAM::Cardinality(PGLOBAL g) } else card = 0; - + // Set number of blocks for later use Block = (card > 0) ? (card + Nrec - 1) / Nrec : 0; Last = (card + Nrec - 1) % Nrec + 1; @@ -301,7 +359,7 @@ int VCTFAM::Cardinality(PGLOBAL g) } // endif split - return (int)((Block - 1) * Nrec + Last); + return (Block) ? ((Block - 1) * Nrec + Last) : 0; } // end of Cardinality /***********************************************************************/ @@ -310,7 +368,7 @@ int VCTFAM::Cardinality(PGLOBAL g) int VCTFAM::GetRowID(void) { return 1 + ((CurBlk < Block) ? CurNum + Nrec * CurBlk - : (Block - 1) * Nrec + Last); + : (Block - 1) * Nrec + Last); } // end of GetRowID /***********************************************************************/ @@ -394,7 +452,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) return true; strcpy(opmode, "r+b"); // Required to update empty blocks - } else if (Last == Nrec) + } else if (!Block || Last == Nrec) strcpy(opmode, "ab"); else strcpy(opmode, "r+b"); // Required to update the last block @@ -429,7 +487,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) return ResetTableSize(g, 0, Nrec); num_read = num_there = num_write = 0; - + // Allocate the table and column block buffer return AllocateBuffer(g); } // end of OpenTableFile @@ -555,9 +613,21 @@ int VCTFAM::ReadBuffer(PGLOBAL g) /*******************************************************************/ CurNum = 0; + next: if (++CurBlk == Block) return RC_EF; // End of file + /*******************************************************************/ + /* Before reading a new block, check whether block optimizing */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + num_there++; } // endif CurNum @@ -600,7 +670,10 @@ int VCTFAM::WriteBuffer(PGLOBAL g) // Mode Update is done in ReadDB, we just initialize it here if (!T_Stream) { if (UseTemp) { - if (OpenTempFile(g)) + if ((Indxd = Tdbp->GetKindex() != NULL)) { + strcpy(g->Message, "VCT indexed udate using temp file NIY"); + return RC_FX; + } else if (OpenTempFile(g)) return RC_FX; // Most of the time, not all table columns are updated. @@ -684,7 +757,7 @@ int VCTFAM::WriteBuffer(PGLOBAL g) int VCTFAM::DeleteRecords(PGLOBAL g, int irc) { bool eof = false; - + if (trace) htrc("VCT DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -694,7 +767,7 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the end-of-file position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file end=%d\n", Fpos); @@ -719,12 +792,17 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) Spos = Tpos = Fpos; } // endif UseTemp + Indxd = Tdbp->GetKindex() != NULL; } // endif Tpos == Spos /*********************************************************************/ /* Move any intermediate lines. */ /*********************************************************************/ - if (MoveIntermediateLines(g, &eof)) + if (Indxd) { + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + Spos = Fpos; + } else if (MoveIntermediateLines(g, &eof)) return RC_FX; if (irc == RC_OK) { @@ -744,6 +822,11 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) /* Last call after EOF has been reached. */ /* Update the Block and Last values. */ /*******************************************************************/ + if (Indxd && (Abort = MakeDeletedFile(g))) + return RC_FX; + else + Indxd = false; // Not to be redone by RenameTempFile + Block = (Tpos > 0) ? (Tpos + Nrec - 1) / Nrec : 0; Last = (Tpos + Nrec - 1) % Nrec + 1; @@ -843,7 +926,7 @@ bool VCTFAM::OpenTempFile(PGLOBAL g) bool VCTFAM::MoveIntermediateLines(PGLOBAL g, bool *b) { int i, dep, off; - int n; + int n; bool eof = (b) ? *b : false; size_t req, len; @@ -944,6 +1027,63 @@ bool VCTFAM::MoveIntermediateLines(PGLOBAL g, bool *b) } // end of MoveIntermediateLines /***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleing them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the deleted record and make the new */ +/* deleted file from the ordered deleted records. */ +/***********************************************************************/ +bool VCTFAM::MakeDeletedFile(PGLOBAL g) + { +//char *crlf = "\n", *mode = UseTemp ? "rb" : "r+b"; + int *ix, i, n; + bool eof = false; + + /*********************************************************************/ + /* Open the temporary file, Spos is at the beginning of file. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + n = Posar->GetNval(); + Spos = 0; + + for (i = 0; i < n; i++) { + if (i == n - 1 && !MaxBlk && UseTemp) + eof = true; + + Fpos = Posar->GetIntValue(ix[i]); + + if (i || UseTemp) { + // Copy all not updated lines preceding this one + if (MoveIntermediateLines(g, &eof)) + goto err; + + } else + Tpos = Fpos; + + // New start position + Spos = Fpos + 1; + } // endfor i + + if (!PlugCloseFile(g, To_Fbt)) + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + PlugCloseFile(g, To_Fbt); + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Clean deleted space in a VCT or Vec table file. */ /***********************************************************************/ bool VCTFAM::CleanUnusedSpace(PGLOBAL g) @@ -1010,11 +1150,13 @@ bool VCTFAM::CleanUnusedSpace(PGLOBAL g) /***********************************************************************/ /* Data Base close routine for VCT access method. */ /***********************************************************************/ -void VCTFAM::CloseTableFile(PGLOBAL g) +void VCTFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = 0, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + if (mode == MODE_INSERT) { if (Closing) wrc = RC_FX; // Last write was in error @@ -1093,10 +1235,10 @@ bool VCTFAM::ResetTableSize(PGLOBAL g, int block, int last) // Update catalog values for Block and Last PVCTDEF defp = (PVCTDEF)Tdbp->GetDef(); LPCSTR name = Tdbp->GetName(); - + defp->SetBlock(Block); defp->SetLast(Last); - + if (!defp->SetIntCatInfo("Blocks", Block) || !defp->SetIntCatInfo("Last", Last)) { sprintf(g->Message, MSG(UPDATE_ERROR), "Header"); @@ -1189,7 +1331,7 @@ bool VCTFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) /* Calculate the offset and size of the block to write. */ /*********************************************************************/ if (MaxBlk) // File has Vector format - len = Headlen + len = Headlen + Nrec * (colp->Deplac * MaxBlk + colp->Clen * colp->ColBlk); else // Old VCT format len = Nrec * (colp->Deplac + Lrecl * colp->ColBlk); @@ -1212,7 +1354,7 @@ bool VCTFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) (size_t)colp->Clen, n, T_Stream)) { sprintf(g->Message, MSG(WRITE_STRERROR), (UseTemp) ? To_Fbt->Fname : To_File, strerror(errno)); - + if (trace) htrc("Write error: %s\n", strerror(errno)); @@ -1537,9 +1679,6 @@ int VCMFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) { - int i; - int m, n; - if (trace) htrc("VCM DeleteDB: irc=%d tobuf=%p Tpos=%p Spos=%p\n", irc, To_Buf, Tpos, Spos); @@ -1549,59 +1688,27 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the top of map position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file top=%p\n", Fpos); } else // Fpos is the Deleted line position Fpos = CurBlk * Nrec + CurNum; - if (Tpos == Spos) + if (Tpos == Spos) { /*******************************************************************/ /* First line to delete. Move of eventual preceding lines is */ /* not required here, just setting of future Spos and Tpos. */ /*******************************************************************/ - Tpos = Fpos; // Spos is set below - else if (Fpos > Spos) { - /*******************************************************************/ - /* Non consecutive line to delete. Move intermediate lines. */ - /*******************************************************************/ - if (!MaxBlk) { - // Old VCT format, moving must respect block limits - char *ps, *pt; - int req, soff, toff; - - for (n = Fpos - Spos; n > 0; n -= req) { - soff = Spos % Nrec; - toff = Tpos % Nrec; - req = (size_t)MY_MIN(n, Nrec - MY_MAX(soff, toff)); - - for (i = 0; i < Ncol; i++) { - ps = Memcol[i] + (Spos / Nrec) * Blksize + soff * Clens[i]; - pt = Memcol[i] + (Tpos / Nrec) * Blksize + toff * Clens[i]; - memmove(pt, ps, req * Clens[i]); - } // endfor i - - Tpos += req; - Spos += req; - } // endfor n - - } else { - // True vector format, all is simple... - n = Fpos - Spos; - - for (i = 0; i < Ncol; i++) { - m = Clens[i]; - memmove(Memcol[i] + Tpos * m, Memcol[i] + Spos * m, n * m); - } // endfor i - - Tpos += n; - } // endif MaxBlk - - if (trace) - htrc("move %d bytes\n", n); + Tpos = Spos = Fpos; + Indxd = Tdbp->GetKindex() != NULL; + } // endif Tpos - } // endif n + if (Indxd) + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + else + (void)MoveIntermediateLines(g); if (irc == RC_OK) { Spos = Fpos + 1; // New start position @@ -1609,10 +1716,14 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) if (trace) htrc("after: Tpos=%p Spos=%p\n", Tpos, Spos); - } else { + } else if (!(Abort = (Indxd && MakeDeletedFile(g)))) { + /*******************************************************************/ + /* Last call after EOF has been reached. */ + /*******************************************************************/ + int i, m, n; + /*******************************************************************/ - /* Last call after EOF has been reached. Reset the Block and */ - /* Last values for TDBVCT::MakeBlockValues. */ + /* Reset the Block and Last values for TDBVCT::MakeBlockValues. */ /*******************************************************************/ Block = (Tpos > 0) ? (Tpos + Nrec - 1) / Nrec : 0; Last = (Tpos + Nrec - 1) % Nrec + 1; @@ -1675,15 +1786,108 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) // Reset Last and Block values in the catalog PlugCloseFile(g, To_Fb); // in case of Header ResetTableSize(g, Block, Last); - } // endif irc + } else + return RC_FX; return RC_OK; // All is correct } // end of DeleteRecords /***********************************************************************/ +/* Move intermediate deleted or updated lines. */ +/***********************************************************************/ +bool VCMFAM::MoveIntermediateLines(PGLOBAL g, bool *b) + { + int i, m, n; + + if ((n = Fpos - Spos) > 0) { + /*******************************************************************/ + /* Non consecutive line to delete. Move intermediate lines. */ + /*******************************************************************/ + if (!MaxBlk) { + // Old VCT format, moving must respect block limits + char *ps, *pt; + int req, soff, toff; + + for (; n > 0; n -= req) { + soff = Spos % Nrec; + toff = Tpos % Nrec; + req = (size_t)MY_MIN(n, Nrec - MY_MAX(soff, toff)); + + for (i = 0; i < Ncol; i++) { + ps = Memcol[i] + (Spos / Nrec) * Blksize + soff * Clens[i]; + pt = Memcol[i] + (Tpos / Nrec) * Blksize + toff * Clens[i]; + memmove(pt, ps, req * Clens[i]); + } // endfor i + + Tpos += req; + Spos += req; + } // endfor n + + } else { + // True vector format, all is simple... + for (i = 0; i < Ncol; i++) { + m = Clens[i]; + memmove(Memcol[i] + Tpos * m, Memcol[i] + Spos * m, n * m); + } // endfor i + + Tpos += n; + } // endif MaxBlk + + if (trace) + htrc("move %d bytes\n", n); + + } // endif n + + return false; + } // end of MoveIntermediate Lines + +/***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleting them. */ +/* What we do here is to reorder the deleted records and move the */ +/* intermediate files from the ordered deleted record positions. */ +/***********************************************************************/ +bool VCMFAM::MakeDeletedFile(PGLOBAL g) + { + int *ix, i; + + /*********************************************************************/ + /* Make and order the arrays from the saved values. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + for (i = 0; i < Posar->GetNval(); i++) { + Fpos = Posar->GetIntValue(ix[i]); + + if (!i) { + Tpos = Fpos; + } else + (void)MoveIntermediateLines(g); + + // New start position + Spos = Fpos + 1; + } // endfor i + + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Data Base close routine for VMP access method. */ /***********************************************************************/ -void VCMFAM::CloseTableFile(PGLOBAL g) +void VCMFAM::CloseTableFile(PGLOBAL g, bool abort) { int wrc = RC_OK; MODE mode = Tdbp->GetMode(); @@ -1710,7 +1914,7 @@ void VCMFAM::CloseTableFile(PGLOBAL g) if (wrc != RC_FX) /*rc =*/ ResetTableSize(g, Block, Last); - } else if (mode != MODE_DELETE) + } else if (mode != MODE_DELETE || Abort) PlugCloseFile(g, To_Fb); } // end of CloseTableFile @@ -1838,7 +2042,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g) // Selective delete, pass thru case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); - strcpy(opmode, (UseTemp) ? "r": "r+"); + strcpy(opmode, (UseTemp) ? "rb": "r+b"); break; case MODE_INSERT: strcpy(opmode, "ab"); @@ -1897,10 +2101,13 @@ bool VECFAM::OpenTableFile(PGLOBAL g) // Check for void table or missing columns for (i = 0, cp = (PVCTCOL)Tdbp->GetColumns(); cp; cp = (PVCTCOL)cp->Next) - if (!i++) - b = !Streams[cp->Index - 1]; - else if (b != !Streams[cp->Index - 1]) - return true; + if (!cp->IsSpecial()) { + if (!i++) + b = !Streams[cp->Index - 1]; + else if (b != !Streams[cp->Index - 1]) + return true; + + } // endif Special } // endif mode @@ -2102,7 +2309,10 @@ int VECFAM::WriteBuffer(PGLOBAL g) } else // Mode Update // Writing updates being done in ReadDB we do initialization only. if (InitUpdate) { - if (OpenTempFile(g)) + if ((Indxd = Tdbp->GetKindex() != NULL)) { + strcpy(g->Message, "VEC indexed udate using temp file NIY"); + return RC_FX; + } else if (OpenTempFile(g)) return RC_FX; InitUpdate = false; // Done @@ -2114,18 +2324,10 @@ int VECFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ /* Data Base delete line routine for split vertical access methods. */ /* Note: lines are moved directly in the files (ooops...) */ +/* Using temp file depends on the Check setting, false by default. */ /***********************************************************************/ int VECFAM::DeleteRecords(PGLOBAL g, int irc) { - /*********************************************************************/ - /* There is an alternative here: */ - /* 1 - use a temporary file in which are copied all not deleted */ - /* lines, at the end the original file will be deleted and */ - /* the temporary file renamed to the original file name. */ - /* 2 - directly move the not deleted lines inside the original */ - /* file, and at the end erase all trailing records. */ - /* This depends on the Check setting, false by default. */ - /*********************************************************************/ if (trace) htrc("VEC DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -2135,14 +2337,14 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the end-of-file position. */ /*******************************************************************/ Fpos = Cardinality(g); - + if (trace) htrc("Fpos placed at file end=%d\n", Fpos); } else // Fpos is the Deleted line position Fpos = CurBlk * Nrec + CurNum; - if (Tpos == Spos) + if (Tpos == Spos) { // First line to delete if (UseTemp) { /*****************************************************************/ @@ -2158,10 +2360,17 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) /*****************************************************************/ Spos = Tpos = Fpos; + Indxd = Tdbp->GetKindex() != NULL; + } // endif Tpos == Spos + /*********************************************************************/ /* Move any intermediate lines. */ /*********************************************************************/ - if (MoveIntermediateLines(g)) + if (Indxd) { + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + Spos = Fpos; + } else if (MoveIntermediateLines(g)) return RC_FX; if (irc == RC_OK) { @@ -2177,10 +2386,15 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ /* Last call after EOF has been reached. */ /*******************************************************************/ + if (Indxd && (Abort = MakeDeletedFile(g))) + return RC_FX; +// else +// Indxd = false; // Not to be redone by RenameTempFile + if (!UseTemp) { /*****************************************************************/ /* Because the chsize functionality is only accessible with a */ - /* system call we must close the file and reopen it with the */ + /* system call we must close the files and reopen them with the */ /* open function (_fopen for MS??) this is still to be checked */ /* for compatibility with other OS's. */ /*****************************************************************/ @@ -2288,8 +2502,7 @@ bool VECFAM::MoveLines(PGLOBAL g) /***********************************************************************/ bool VECFAM::MoveIntermediateLines(PGLOBAL g, bool *bn) { - int i; - int n; + int i, n; bool b = false; size_t req, len; @@ -2348,6 +2561,56 @@ bool VECFAM::MoveIntermediateLines(PGLOBAL g, bool *bn) } // end of MoveIntermediate Lines /***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleing them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the deleted record and make the new */ +/* deleted file from the ordered deleted records. */ +/***********************************************************************/ +bool VECFAM::MakeDeletedFile(PGLOBAL g) + { + int *ix, i, n; + + /*********************************************************************/ + /* Open the temporary file, Spos is at the beginning of file. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + n = Posar->GetNval(); + Spos = 0; + + for (i = 0; i < n; i++) { + Fpos = Posar->GetIntValue(ix[i]); + + if (i || UseTemp) { + // Copy all not updated lines preceding this one + if (MoveIntermediateLines(g)) + goto err; + + } else + Tpos = Fpos; + + // New start position + Spos = Fpos + 1; + } // endfor i + + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Delete the old files and rename the new temporary files. */ /***********************************************************************/ int VECFAM::RenameTempFile(PGLOBAL g) @@ -2366,25 +2629,30 @@ int VECFAM::RenameTempFile(PGLOBAL g) continue; tempname = (char*)T_Fbs[i]->Fname; - sprintf(filename, Colfn, i+1); - PlugSetPath(filename, filename, Tdbp->GetPath()); - strcat(PlugRemoveType(filetemp, filename), ".ttt"); - remove(filetemp); // May still be there from previous error - - if (rename(filename, filetemp)) { // Save file for security - sprintf(g->Message, MSG(RENAME_ERROR), - filename, filetemp, strerror(errno)); - rc = RC_FX; - } else if (rename(tempname, filename)) { - sprintf(g->Message, MSG(RENAME_ERROR), - tempname, filename, strerror(errno)); - rc = rename(filetemp, filename); // Restore saved file - rc = RC_FX; - } else if (remove(filetemp)) { - sprintf(g->Message, MSG(REMOVE_ERROR), - filetemp, strerror(errno)); - rc = RC_INFO; // Acceptable - } // endif's + + if (!Abort) { + sprintf(filename, Colfn, i+1); + PlugSetPath(filename, filename, Tdbp->GetPath()); + strcat(PlugRemoveType(filetemp, filename), ".ttt"); + remove(filetemp); // May still be there from previous error + + if (rename(filename, filetemp)) { // Save file for security + sprintf(g->Message, MSG(RENAME_ERROR), + filename, filetemp, strerror(errno)); + rc = RC_FX; + } else if (rename(tempname, filename)) { + sprintf(g->Message, MSG(RENAME_ERROR), + tempname, filename, strerror(errno)); + rc = rename(filetemp, filename); // Restore saved file + rc = RC_FX; + } else if (remove(filetemp)) { + sprintf(g->Message, MSG(REMOVE_ERROR), + filetemp, strerror(errno)); + rc = RC_INFO; // Acceptable + } // endif's + + } else + remove(tempname); } // endfor i @@ -2394,11 +2662,13 @@ int VECFAM::RenameTempFile(PGLOBAL g) /***********************************************************************/ /* Data Base close routine for VEC access method. */ /***********************************************************************/ -void VECFAM::CloseTableFile(PGLOBAL g) +void VECFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = 0, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + if (mode == MODE_INSERT) { if (Closing) wrc = RC_FX; // Last write was in error @@ -2421,10 +2691,10 @@ void VECFAM::CloseTableFile(PGLOBAL g) longjmp(g->jumper[g->jump_level], 44); } else if (mode == MODE_UPDATE) { - if (UseTemp && !InitUpdate) { + if (UseTemp && !InitUpdate && !Abort) { // Write any intermediate lines to temp file Fpos = OldBlk * Nrec; - wrc = MoveIntermediateLines(g); + Abort = MoveIntermediateLines(g) != RC_OK; // Spos = Fpos + Nrec; } // endif UseTemp @@ -2434,20 +2704,17 @@ void VECFAM::CloseTableFile(PGLOBAL g) colp; colp = (PVCTCOL)colp->Next) colp->WriteBlock(g); - if (wrc == RC_OK && UseTemp && !InitUpdate) { + if (wrc == RC_OK && UseTemp && !InitUpdate && !Abort) { // Write any intermediate lines to temp file Fpos = (Block - 1) * Nrec + Last; - wrc = MoveIntermediateLines(g); + Abort = MoveIntermediateLines(g) != RC_OK; } // endif UseTemp } // endif's mode if (UseTemp && !InitUpdate) { // If they are errors, leave files unchanged - if (wrc == RC_OK) - rc = RenameTempFile(g); - else - longjmp(g->jumper[g->jump_level], 44); + rc = RenameTempFile(g); } else if (Streams) for (int i = 0; i < Ncol; i++) @@ -2553,7 +2820,7 @@ bool VECFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) sprintf(fn, (UseTemp) ? Tempat : Colfn, colp->Index); sprintf(g->Message, MSG(WRITE_STRERROR), fn, strerror(errno)); - + if (trace) htrc("Write error: %s\n", strerror(errno)); @@ -2593,7 +2860,7 @@ VMPFAM::VMPFAM(PVMPFAM txfp) : VCMFAM(txfp) bool VMPFAM::OpenTableFile(PGLOBAL g) { int i; - bool b; + bool b = false; MODE mode = Tdbp->GetMode(); PCOLDEF cdp; PVCTCOL cp; @@ -2639,7 +2906,7 @@ bool VMPFAM::OpenTableFile(PGLOBAL g) } else { /*******************************************************************/ - /* Open the files corresponding updated columns of the query. */ + /* Open the files corresponding to updated columns of the query. */ /*******************************************************************/ for (cp = (PVCTCOL)((PTDBVCT)Tdbp)->To_SetCols; cp; cp = (PVCTCOL)cp->Next) @@ -2654,14 +2921,18 @@ bool VMPFAM::OpenTableFile(PGLOBAL g) if (MapColumnFile(g, MODE_READ, cp->Index - 1)) return true; - } // endif mode + // Check for void table or missing columns + for (i = 0, cp = (PVCTCOL)Tdbp->GetColumns(); cp; + cp = (PVCTCOL)cp->Next) + if (!cp->IsSpecial()) { + if (!i++) + b = !Memcol[cp->Index - 1]; + else if (b != !Memcol[cp->Index - 1]) + return true; - /*********************************************************************/ - /* Check for void table or missing columns */ - /*********************************************************************/ - for (b = !Memcol[0], i = 1; i < Ncol; i++) - if (b != !Memcol[i]) - return true; + } // endif Special + + } // endif mode /*********************************************************************/ /* Allocate the table and column block buffer. */ @@ -2825,19 +3096,25 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the top of map position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file top=%p\n", Fpos); } else // Fpos is the Deleted line position Fpos = CurBlk * Nrec + CurNum; - if (Tpos == Spos) + if (Tpos == Spos) { /*******************************************************************/ /* First line to delete. Move of eventual preceding lines is */ /* not required here, just setting of future Spos and Tpos. */ /*******************************************************************/ Tpos = Fpos; // Spos is set below + Indxd = Tdbp->GetKindex() != NULL; + } // endif Tpos + + if (Indxd) + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); else if ((n = Fpos - Spos) > 0) { /*******************************************************************/ /* Non consecutive line to delete. Move intermediate lines. */ @@ -2860,7 +3137,7 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) if (trace) htrc("after: Tpos=%p Spos=%p\n", Tpos, Spos); - } else { + } else if (!(Abort = (Indxd && MakeDeletedFile(g)))) { /*******************************************************************/ /* Last call after EOF has been reached. */ /* We must firstly Unmap the view and use the saved file handle */ @@ -2868,6 +3145,12 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ PFBLOCK fp; + /*******************************************************************/ + /* Reset the Block and Last values for TDBVCT::MakeBlockValues. */ + /*******************************************************************/ +// Block = (Tpos > 0) ? (Tpos + Nrec - 1) / Nrec : 0; +// Last = (Tpos + Nrec - 1) % Nrec + 1; + for (i = 0; i < Ncol; i++) { fp = To_Fbs[i]; CloseMemMap(fp->Memory, (size_t)fp->Length); @@ -2916,9 +3199,58 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) } // end of DeleteRecords /***********************************************************************/ +/* MakeDeletedFile. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleting them. */ +/* What we do here is to reorder the deleted records and move the */ +/* intermediate files from the ordered deleted record positions. */ +/***********************************************************************/ +bool VMPFAM::MakeDeletedFile(PGLOBAL g) + { + int *ix, i, j, m, n; + + /*********************************************************************/ + /* Make and order the arrays from the saved values. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { + strcpy(g->Message, "Position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + for (i = 0; i < Posar->GetNval(); i++) { + Fpos = Posar->GetIntValue(ix[i]); + + if (!i) { + Tpos = Fpos; + } else if ((n = Fpos - Spos) > 0) { + for (j = 0; j < Ncol; j++) { + m = Clens[j]; + memmove(Memcol[j] + Tpos * m, Memcol[j] + Spos * m, m * n); + } // endif j + + Tpos += n; + } // endif n + + // New start position + Spos = Fpos + 1; + } // endfor i + + return false; + +err: + if (trace) + htrc("%s\n", g->Message); + + return true; + } // end of MakeDeletedFile + +/***********************************************************************/ /* Data Base close routine for VMP access method. */ /***********************************************************************/ -void VMPFAM::CloseTableFile(PGLOBAL g) +void VMPFAM::CloseTableFile(PGLOBAL g, bool abort) { if (Tdbp->GetMode() == MODE_DELETE) { // Set Block and Nrec values for TDBVCT::MakeBlockValues @@ -3011,7 +3343,7 @@ bool BGVFAM::BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req) } // endelse brc sprintf(g->Message, MSG(READ_ERROR), To_File, buf); - + if (trace) htrc("BIGREAD: %s\n", g->Message); @@ -3025,7 +3357,7 @@ bool BGVFAM::BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req) const char *fn = (h == Hfile) ? To_File : "Tempfile"; sprintf(g->Message, MSG(READ_ERROR), fn, strerror(errno)); - + if (trace) htrc("BIGREAD: nbr=%d len=%d errno=%d %s\n", nbr, len, errno, g->Message); @@ -3079,7 +3411,7 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) const char *fn = (h == Hfile) ? To_File : "Tempfile"; sprintf(g->Message, MSG(WRITE_STRERROR), fn, strerror(errno)); - + if (trace) htrc("BIGWRITE: nbw=%d len=%d errno=%d %s\n", nbw, len, errno, g->Message); @@ -3132,17 +3464,17 @@ int BGVFAM::GetBlockInfo(PGLOBAL g) // Consider this is a void table if (trace) htrc("Void table h=%d\n", h); - - Last = Nrec; + + Last = Nrec; Block = 0; if (h != INVALID_HANDLE_VALUE) CloseFileHandle(h); return n; - } else if (Header == 3) + } else if (Header == 3) /*b = */ BigSeek(g, h, -(BIGINT)sizeof(vh), true); - + if (BigRead(g, h, &vh, sizeof(vh))) { sprintf(g->Message, "Error reading header file %s", filename); n = -1; @@ -3153,10 +3485,10 @@ int BGVFAM::GetBlockInfo(PGLOBAL g) } else { Block = (vh.NumRec > 0) ? (vh.NumRec + Nrec - 1) / Nrec : 0; Last = (vh.NumRec + Nrec - 1) % Nrec + 1; - + if (trace) htrc("Block=%d Last=%d\n", Block, Last); - + } // endif's CloseFileHandle(h); @@ -3297,7 +3629,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn) if (h == -1) return true; - + pos = (BIGINT)n + (BIGINT)MaxBlk * (BIGINT)Blksize - (BIGINT)1; if (trace) @@ -3637,7 +3969,10 @@ int BGVFAM::WriteBuffer(PGLOBAL g) // Mode Update is done in ReadDB, we just initialize it here if (Tfile == INVALID_HANDLE_VALUE) { if (UseTemp) { - if (OpenTempFile(g)) + if ((Indxd = Tdbp->GetKindex() != NULL)) { + strcpy(g->Message, "VEC indexed udate using temp file NIY"); + return RC_FX; + } else if (OpenTempFile(g)) return RC_FX; // Most of the time, not all table columns are updated. @@ -3738,7 +4073,7 @@ int BGVFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the end-of-file position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file end=%d\n", Fpos); @@ -3764,12 +4099,17 @@ int BGVFAM::DeleteRecords(PGLOBAL g, int irc) Spos = Tpos = Fpos; } // endif UseTemp + Indxd = Tdbp->GetKindex() != NULL; } // endif Tpos == Spos /*********************************************************************/ /* Move any intermediate lines. */ /*********************************************************************/ - if (MoveIntermediateLines(g, &eof)) + if (Indxd) { + // Moving will be done later, must be done in sequential order + (void)AddListValue(g, TYPE_INT, &Fpos, &To_Pos); + Spos = Fpos; + } else if (MoveIntermediateLines(g, &eof)) return RC_FX; if (irc == RC_OK) { @@ -3785,6 +4125,11 @@ int BGVFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ /* Last call after EOF has been reached. */ /*******************************************************************/ + if (Indxd && (Abort = MakeDeletedFile(g))) + return RC_FX; + else + Indxd = false; // Not to be redone by RenameTempFile + Block = (Tpos > 0) ? (Tpos + Nrec - 1) / Nrec : 0; Last = (Tpos + Nrec - 1) % Nrec + 1; @@ -4040,11 +4385,13 @@ bool BGVFAM::CleanUnusedSpace(PGLOBAL g) /***********************************************************************/ /* Data Base close routine for huge VEC access method. */ /***********************************************************************/ -void BGVFAM::CloseTableFile(PGLOBAL g) +void BGVFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = 0, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + if (mode == MODE_INSERT) { if (Closing) wrc = RC_FX; // Last write was in error diff --git a/storage/connect/filamvct.h b/storage/connect/filamvct.h index 0dd1c06ad8b..8acb62b14dc 100644 --- a/storage/connect/filamvct.h +++ b/storage/connect/filamvct.h @@ -37,9 +37,11 @@ class DllExport VCTFAM : public FIXFAM { virtual AMT GetAmType(void) {return TYPE_AM_VCT;} virtual PTXF Duplicate(PGLOBAL g) {return (PTXF)new(g) VCTFAM(this);} + virtual int GetFileLength(PGLOBAL g); // Methods virtual void Reset(void); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g); virtual bool InitInsert(PGLOBAL g); virtual void ResetBuffer(PGLOBAL g) {} @@ -50,8 +52,8 @@ class DllExport VCTFAM : public FIXFAM { virtual bool OpenTableFile(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); // Specific functions @@ -59,19 +61,20 @@ class DllExport VCTFAM : public FIXFAM { virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); protected: - virtual bool MakeEmptyFile(PGLOBAL g, char *fn); + virtual bool MakeEmptyFile(PGLOBAL g, char *fn); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveLines(PGLOBAL g) {return false;} virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual bool CleanUnusedSpace(PGLOBAL g); - virtual int GetBlockInfo(PGLOBAL g); - virtual bool SetBlockInfo(PGLOBAL g); + virtual bool MakeDeletedFile(PGLOBAL g); + virtual int GetBlockInfo(PGLOBAL g); + virtual bool SetBlockInfo(PGLOBAL g); bool ResetTableSize(PGLOBAL g, int block, int last); // Members char *NewBlock; // To block written on Insert - char *Colfn; // Pattern for column file names (VER) - char *Tempat; // Pattern for temp file names (VER) + char *Colfn; // Pattern for column file names (VEC) + char *Tempat; // Pattern for temp file names (VEC) int *Clens; // Pointer to col size array int *Deplac; // Pointer to col start position array bool *Isnum; // Pointer to buffer type isnum result @@ -107,10 +110,13 @@ class DllExport VCMFAM : public VCTFAM { // Database routines virtual bool OpenTableFile(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); + protected: // Specific functions + virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); + virtual bool MakeDeletedFile(PGLOBAL g); virtual bool ReadBlock(PGLOBAL g, PVCTCOL colp); virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); @@ -144,18 +150,19 @@ class DllExport VECFAM : public VCTFAM { // Database routines virtual bool OpenTableFile(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); // Specific functions virtual bool ReadBlock(PGLOBAL g, PVCTCOL colp); virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); protected: - virtual bool OpenTempFile(PGLOBAL g); + virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveLines(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual int RenameTempFile(PGLOBAL g); + virtual bool MakeDeletedFile(PGLOBAL g); bool OpenColumnFile(PGLOBAL g, char *opmode, int i); // Members @@ -189,9 +196,10 @@ class DllExport VMPFAM : public VCMFAM { // Database routines virtual bool OpenTableFile(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); protected: + virtual bool MakeDeletedFile(PGLOBAL g); bool MapColumnFile(PGLOBAL g, MODE mode, int i); // Members @@ -220,7 +228,7 @@ class BGVFAM : public VCTFAM { virtual bool OpenTableFile(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); // Specific functions diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 0ec9e65c17e..8473011ab8b 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -1,11 +1,11 @@ /*********** File AM Zip C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMZIP */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.5 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -306,10 +306,27 @@ int ZIPFAM::ReadBuffer(PGLOBAL g) /*******************************************************************/ /* Record file position in case of UPDATE or DELETE. */ /*******************************************************************/ + next: if (RecordPos(g)) return RC_FX; CurBlk = Rows++; // Update RowID + + /*******************************************************************/ + /* Check whether optimization on ROWID */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + // Skip this record + if ((rc = SkipRecord(g, FALSE)) != RC_OK) + return rc; + + goto next; + } // endswitch rc + } else Placed = false; @@ -369,7 +386,7 @@ int ZIPFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ /* Data Base close routine for DOS access method. */ /***********************************************************************/ -void ZIPFAM::CloseTableFile(PGLOBAL g) +void ZIPFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = gzclose(Zfile); @@ -402,7 +419,7 @@ ZBKFAM::ZBKFAM(PDOSDEF tdp) : ZIPFAM(tdp) CurLine = NULL; NxtLine = NULL; Closing = false; - BlkPos = NULL; + BlkPos = tdp->GetTo_Pos(); } // end of ZBKFAM standard constructor ZBKFAM::ZBKFAM(PZBKFAM txfp) : ZIPFAM(txfp) @@ -413,15 +430,33 @@ ZBKFAM::ZBKFAM(PZBKFAM txfp) : ZIPFAM(txfp) } // end of ZBKFAM copy constructor /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/***********************************************************************/ +int ZBKFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk; + int size; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == Block - 1) ? Last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ /* ZBK Cardinality: returns table cardinality in number of rows. */ /* This function can be called with a null argument to test the */ /* availability of Cardinality implementation (1 yes, 0 no). */ /***********************************************************************/ int ZBKFAM::Cardinality(PGLOBAL g) { - // Should not be called in this version - return (g) ? -1 : 0; -//return (g) ? (int)((Block - 1) * Nrec + Last) : 1; + return (g) ? (int)((Block - 1) * Nrec + Last) : 1; } // end of Cardinality /***********************************************************************/ @@ -491,8 +526,80 @@ int ZBKFAM::SkipRecord(PGLOBAL g, bool header) /***********************************************************************/ int ZBKFAM::ReadBuffer(PGLOBAL g) { - strcpy(g->Message, "This AM cannot be used in this version"); - return RC_FX; + int n, skip, rc = RC_OK; + + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (++CurNum < Rbuf) { + CurLine = NxtLine; + + // Get the position of the next line in the buffer + while (*NxtLine++ != '\n') ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + return RC_OK; + } else if (Rbuf < Nrec && CurBlk != -1) + return RC_EF; + + /*********************************************************************/ + /* New block. */ + /*********************************************************************/ + CurNum = 0; + skip = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*********************************************************************/ + /* Before using the new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*********************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + skip++; + goto next; + } // endswitch rc + + if (skip) + // Skip blocks rejected by block optimization + for (int i = CurBlk - skip; i < CurBlk; i++) { + BlkLen = BlkPos[i + 1] - BlkPos[i]; + + if (gzseek(Zfile, (z_off_t)BlkLen, SEEK_CUR) < 0) + return Zerror(g); + + } // endfor i + + BlkLen = BlkPos[CurBlk + 1] - BlkPos[CurBlk]; + + if (!(n = gzread(Zfile, To_Buf, BlkLen))) { + rc = RC_EF; + } else if (n > 0) { + // Get the position of the current line + CurLine = To_Buf; + + // Now get the position of the next line + for (NxtLine = CurLine; *NxtLine++ != '\n';) ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + Rbuf = (CurBlk == Block - 1) ? Last : Nrec; + IsRead = true; + rc = RC_OK; + num_read++; + } else + rc = Zerror(g); + + return rc; } // end of ReadBuffer /***********************************************************************/ @@ -562,7 +669,7 @@ int ZBKFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ /* Data Base close routine for ZBK access method. */ /***********************************************************************/ -void ZBKFAM::CloseTableFile(PGLOBAL g) +void ZBKFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK; @@ -701,6 +808,32 @@ int ZIXFAM::ReadBuffer(PGLOBAL g) CurNum = 0; Tdbp->SetLine(To_Buf); + int skip = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*********************************************************************/ + /* Before using the new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*********************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + skip++; + goto next; + } // endswitch rc + + if (skip) + // Skip blocks rejected by block optimization + for (int i = 0; i < skip; i++) { + if (gzseek(Zfile, (z_off_t)Buflen, SEEK_CUR) < 0) + return Zerror(g); + + } // endfor i + if (!(n = gzread(Zfile, To_Buf, Buflen))) { rc = RC_EF; } else if (n > 0) { @@ -746,4 +879,544 @@ int ZIXFAM::WriteBuffer(PGLOBAL g) return RC_OK; } // end of WriteBuffer +/* --------------------------- Class ZLBFAM -------------------------- */ + +/***********************************************************************/ +/* Constructors. */ +/***********************************************************************/ +ZLBFAM::ZLBFAM(PDOSDEF tdp) : BLKFAM(tdp) + { + Zstream = NULL; + Zbuffer = NULL; + Zlenp = NULL; + Optimized = tdp->IsOptimized(); + } // end of ZLBFAM standard constructor + +ZLBFAM::ZLBFAM(PZLBFAM txfp) : BLKFAM(txfp) + { + Zstream = txfp->Zstream; + Zbuffer = txfp->Zbuffer; + Zlenp = txfp->Zlenp; + Optimized = txfp->Optimized; + } // end of ZLBFAM (dummy?) copy constructor + +/***********************************************************************/ +/* ZLB GetFileLength: returns an estimate of what would be the */ +/* uncompressed file size in number of bytes. */ +/***********************************************************************/ +int ZLBFAM::GetFileLength(PGLOBAL g) + { + int len = (Optimized) ? BlkPos[Block] : BLKFAM::GetFileLength(g); + + if (len > 0) + // Estimate size reduction to a max of 5 + len *= 5; + + return len; + } // end of GetFileLength + +/***********************************************************************/ +/* Allocate the line buffer. For mode Delete a bigger buffer has to */ +/* be allocated because is it also used to move lines into the file. */ +/***********************************************************************/ +bool ZLBFAM::AllocateBuffer(PGLOBAL g) + { + char *msg; + int n, zrc; + +#if 0 + if (!Optimized && Tdbp->NeedIndexing(g)) { + strcpy(g->Message, MSG(NOP_ZLIB_INDEX)); + return TRUE; + } // endif indexing +#endif // 0 + +#if defined(NOLIB) + if (!zlib && LoadZlib()) { + sprintf(g->Message, MSG(DLL_LOAD_ERROR), GetLastError(), "zlib.dll"); + return TRUE; + } // endif zlib +#endif + + BLKFAM::AllocateBuffer(g); +//Buflen = Nrec * (Lrecl + 2); +//Rbuf = Nrec; + + // Allocate the compressed buffer + n = Buflen + 16; // ????????????????????????????????? + Zlenp = (int*)PlugSubAlloc(g, NULL, n); + Zbuffer = (Byte*)(Zlenp + 1); + + // Allocate and initialize the Z stream + Zstream = (z_streamp)PlugSubAlloc(g, NULL, sizeof(z_stream)); + Zstream->zalloc = (alloc_func)0; + Zstream->zfree = (free_func)0; + Zstream->opaque = (voidpf)0; + Zstream->next_in = NULL; + Zstream->avail_in = 0; + + if (Tdbp->GetMode() == MODE_READ) { + msg = "inflateInit"; + zrc = inflateInit(Zstream); + } else { + msg = "deflateInit"; + zrc = deflateInit(Zstream, Z_DEFAULT_COMPRESSION); + } // endif Mode + + if (zrc != Z_OK) { + if (Zstream->msg) + sprintf(g->Message, "%s error: %s", msg, Zstream->msg); + else + sprintf(g->Message, "%s error: %d", msg, zrc); + + return TRUE; + } // endif zrc + + if (Tdbp->GetMode() == MODE_INSERT) { + // Write the file header block + if (Last == Nrec) { + CurBlk = Block; + CurNum = 0; + + if (!GetFileLength(g)) { + // Write the zlib header as an extra block + strcpy(To_Buf, "PlugDB"); + BlkLen = strlen("PlugDB") + 1; + + if (WriteCompressedBuffer(g)) + return TRUE; + + } // endif void file + + } else { + // In mode insert, if Last != Nrec, last block must be updated + CurBlk = Block - 1; + CurNum = Last; + + strcpy(g->Message, MSG(NO_PAR_BLK_INS)); + return TRUE; + } // endif Last + + } else { // MODE_READ + // First thing to do is to read the header block + void *rdbuf; + + if (Optimized) { + BlkLen = BlkPos[0]; + rdbuf = Zlenp; + } else { + // Get the stored length from the file itself + if (fread(Zlenp, sizeof(int), 1, Stream) != 1) + return FALSE; // Empty file + + BlkLen = *Zlenp; + rdbuf = Zbuffer; + } // endif Optimized + + switch (ReadCompressedBuffer(g, rdbuf)) { + case RC_EF: + return FALSE; + case RC_FX: +#if defined(UNIX) + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#endif + case RC_NF: + return TRUE; + } // endswitch + + // Some old tables can have PlugDB in their header + if (strcmp(To_Buf, "PlugDB")) { + sprintf(g->Message, MSG(BAD_HEADER), Tdbp->GetFile(g)); + return TRUE; + } // endif strcmp + + } // endif Mode + + return FALSE; + } // end of AllocateBuffer + +/***********************************************************************/ +/* GetPos: return the position of last read record. */ +/***********************************************************************/ +int ZLBFAM::GetPos(void) + { + return (Optimized) ? (CurNum + Nrec * CurBlk) : Fpos; + } // end of GetPos + +/***********************************************************************/ +/* GetNextPos: should not be called for this class. */ +/***********************************************************************/ +int ZLBFAM::GetNextPos(void) + { + if (Optimized) { + assert(FALSE); + return 0; + } else + return ftell(Stream); + + } // end of GetNextPos + +/***********************************************************************/ +/* SetPos: Replace the table at the specified position. */ +/***********************************************************************/ +bool ZLBFAM::SetPos(PGLOBAL g, int pos) + { + sprintf(g->Message, MSG(NO_SETPOS_YET), "ZIP"); + return true; +#if 0 // All this must be checked + if (pos < 0) { + strcpy(g->Message, MSG(INV_REC_POS)); + return true; + } // endif recpos + + CurBlk = pos / Nrec; + CurNum = pos % Nrec; +#if defined(_DEBUG) + num_eq[(CurBlk == OldBlk) ? 1 : 0]++; +#endif + + // Indicate the table position was externally set + Placed = true; + return false; +#endif // 0 + } // end of SetPos + +/***********************************************************************/ +/* ReadBuffer: Read one line for a text file. */ +/***********************************************************************/ +int ZLBFAM::ReadBuffer(PGLOBAL g) + { + int n; + void *rdbuf; + + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Placed = FALSE; + } else if (++CurNum < Rbuf) { + CurLine = NxtLine; + + // Get the position of the next line in the buffer + if (Tdbp->GetFtype() == RECFM_VAR) + while (*NxtLine++ != '\n') ; + else + NxtLine += Lrecl; + + // Set caller line buffer + n = NxtLine - CurLine - ((Tdbp->GetFtype() == RECFM_BIN) ? 0 : Ending); + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + return RC_OK; + } else if (Rbuf < Nrec && CurBlk != -1) { + CurNum--; // To have a correct Last value when optimizing + return RC_EF; + } else { + /*******************************************************************/ + /* New block. */ + /*******************************************************************/ + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + if (Optimized) switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + + } // endif's + + if (OldBlk == CurBlk) + goto ok; // Block is already there + + if (Optimized) { + // Store the position of next block + Fpos = BlkPos[CurBlk]; + + // fseek is required only in non sequential reading + if (CurBlk != OldBlk + 1) + if (fseek(Stream, Fpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos); + return RC_FX; + } // endif fseek + + // Calculate the length of block to read + BlkLen = BlkPos[CurBlk + 1] - Fpos; + rdbuf = Zlenp; + } else { // !Optimized + if (CurBlk != OldBlk + 1) { + strcpy(g->Message, MSG(INV_RAND_ACC)); + return RC_FX; + } else + Fpos = ftell(Stream); // Used when optimizing + + // Get the stored length from the file itself + if (fread(Zlenp, sizeof(int), 1, Stream) != 1) { + if (feof(Stream)) + return RC_EF; + + goto err; + } // endif fread + + BlkLen = *Zlenp; + rdbuf = Zbuffer; + } // endif Optimized + + // Read the next block + switch (ReadCompressedBuffer(g, rdbuf)) { + case RC_FX: goto err; + case RC_NF: return RC_FX; + case RC_EF: return RC_EF; + default: Rbuf = (CurBlk == Block - 1) ? Last : Nrec; + } // endswitch ReadCompressedBuffer + + ok: + if (Tdbp->GetFtype() == RECFM_VAR) { + int i; + + // Get the position of the current line + for (i = 0, CurLine = To_Buf; i < CurNum; i++) + while (*CurLine++ != '\n') ; // What about Unix ??? + + // Now get the position of the next line + for (NxtLine = CurLine; *NxtLine++ != '\n';) ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + } else { + CurLine = To_Buf + CurNum * Lrecl; + NxtLine = CurLine + Lrecl; + n = Lrecl - ((Tdbp->GetFtype() == RECFM_BIN) ? 0 : Ending); + } // endif Ftype + + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + + OldBlk = CurBlk; // Last block actually read + IsRead = TRUE; // Is read indeed + return RC_OK; + + err: +#if defined(UNIX) + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#endif + return RC_FX; + } // end of ReadBuffer + +/***********************************************************************/ +/* Read and decompress a block from the stream. */ +/***********************************************************************/ +int ZLBFAM::ReadCompressedBuffer(PGLOBAL g, void *rdbuf) + { + if (fread(rdbuf, 1, (size_t)BlkLen, Stream) == (unsigned)BlkLen) { + int zrc; + + num_read++; + + if (Optimized && BlkLen != signed(*Zlenp + sizeof(int))) { + sprintf(g->Message, MSG(BAD_BLK_SIZE), CurBlk + 1); + return RC_NF; + } // endif BlkLen + + // HERE WE MUST INFLATE THE BLOCK + Zstream->next_in = Zbuffer; + Zstream->avail_in = (uInt)(*Zlenp); + Zstream->next_out = (Byte*)To_Buf; + Zstream->avail_out = Buflen; + zrc = inflate(Zstream, Z_SYNC_FLUSH); + + if (zrc != Z_OK) { + if (Zstream->msg) + sprintf(g->Message, MSG(FUNC_ERR_S), "inflate", Zstream->msg); + else + sprintf(g->Message, MSG(FUNCTION_ERROR), "inflate", (int)zrc); + + return RC_NF; + } // endif zrc + + } else if (feof(Stream)) { + return RC_EF; + } else + return RC_FX; + + return RC_OK; + } // end of ReadCompressedBuffer + +/***********************************************************************/ +/* WriteBuffer: File write routine for DOS access method. */ +/* Update is directly written back into the file, */ +/* with this (fast) method, record size cannot change. */ +/***********************************************************************/ +int ZLBFAM::WriteBuffer(PGLOBAL g) + { + assert (Tdbp->GetMode() == MODE_INSERT); + + /*********************************************************************/ + /* Prepare the write buffer. */ + /*********************************************************************/ + if (!Closing) { + if (Tdbp->GetFtype() == RECFM_BIN) + memcpy(CurLine, Tdbp->GetLine(), Lrecl); + else + strcat(strcpy(CurLine, Tdbp->GetLine()), CrLf); + +#if defined(_DEBUG) + if (Tdbp->GetFtype() == RECFM_FIX && + (signed)strlen(CurLine) != Lrecl + (signed)strlen(CrLf)) { + strcpy(g->Message, MSG(BAD_LINE_LEN)); + Closing = TRUE; + return RC_FX; + } // endif Lrecl +#endif // _DEBUG + } // endif Closing + + /*********************************************************************/ + /* In Insert mode, blocs are added sequentialy to the file end. */ + /*********************************************************************/ + if (++CurNum != Rbuf) { + if (Tdbp->GetFtype() == RECFM_VAR) + CurLine += strlen(CurLine); + else + CurLine += Lrecl; + + return RC_OK; // We write only full blocks + } // endif CurNum + + // HERE WE MUST DEFLATE THE BLOCK + if (Tdbp->GetFtype() == RECFM_VAR) + NxtLine = CurLine + strlen(CurLine); + else + NxtLine = CurLine + Lrecl; + + BlkLen = NxtLine - To_Buf; + + if (WriteCompressedBuffer(g)) { + Closing = TRUE; // To tell CloseDB about a Write error + return RC_FX; + } // endif WriteCompressedBuffer + + CurBlk++; + CurNum = 0; + CurLine = To_Buf; + return RC_OK; + } // end of WriteBuffer + +/***********************************************************************/ +/* Compress the buffer and write the deflated output to stream. */ +/***********************************************************************/ +bool ZLBFAM::WriteCompressedBuffer(PGLOBAL g) + { + int zrc; + + Zstream->next_in = (Byte*)To_Buf; + Zstream->avail_in = (uInt)BlkLen; + Zstream->next_out = Zbuffer; + Zstream->avail_out = Buflen + 16; + Zstream->total_out = 0; + zrc = deflate(Zstream, Z_FULL_FLUSH); + + if (zrc != Z_OK) { + if (Zstream->msg) + sprintf(g->Message, MSG(FUNC_ERR_S), "deflate", Zstream->msg); + else + sprintf(g->Message, MSG(FUNCTION_ERROR), "deflate", (int)zrc); + + return TRUE; + } else + *Zlenp = Zstream->total_out; + + // Now start the writing process. + BlkLen = *Zlenp + sizeof(int); + + if (fwrite(Zlenp, 1, BlkLen, Stream) != (size_t)BlkLen) { + sprintf(g->Message, MSG(FWRITE_ERROR), strerror(errno)); + return TRUE; + } // endif size + + return FALSE; + } // end of WriteCompressedBuffer + +/***********************************************************************/ +/* Table file close routine for DOS access method. */ +/***********************************************************************/ +void ZLBFAM::CloseTableFile(PGLOBAL g, bool abort) + { + int rc = RC_OK; + + if (Tdbp->GetMode() == MODE_INSERT) { + LPCSTR name = Tdbp->GetName(); + PDOSDEF defp = (PDOSDEF)Tdbp->GetDef(); + + // Closing is True if last Write was in error + if (CurNum && !Closing) { + // Some more inserted lines remain to be written + Last = (Nrec - Rbuf) + CurNum; + Block = CurBlk + 1; + Rbuf = CurNum--; + Closing = TRUE; + rc = WriteBuffer(g); + } else if (Rbuf == Nrec) { + Last = Nrec; + Block = CurBlk; + } // endif CurNum + + if (rc != RC_FX) { + defp->SetBlock(Block); + defp->SetLast(Last); + defp->SetIntCatInfo("Blocks", Block); + defp->SetIntCatInfo("Last", Last); + } // endif + + fclose(Stream); + } else + rc = fclose(Stream); + + if (trace) + htrc("ZLB CloseTableFile: closing %s mode=%d rc=%d\n", + To_File, Tdbp->GetMode(), rc); + + Stream = NULL; // So we can know whether table is open + To_Fb->Count = 0; // Avoid double closing by PlugCloseAll + + if (Tdbp->GetMode() == MODE_READ) + rc = inflateEnd(Zstream); + else + rc = deflateEnd(Zstream); + + } // end of CloseTableFile + +/***********************************************************************/ +/* Rewind routine for ZLIB access method. */ +/***********************************************************************/ +void ZLBFAM::Rewind(void) + { + // We must be positioned after the header block + if (CurBlk >= 0) { // Nothing to do if no block read yet + if (!Optimized) { // If optimized, fseek will be done in ReadBuffer + rewind(Stream); + fread(Zlenp, sizeof(int), 1, Stream); + fseek(Stream, *Zlenp + sizeof(int), SEEK_SET); + OldBlk = -1; + } // endif Optimized + + CurBlk = -1; + CurNum = Rbuf; + } // endif CurBlk + +//OldBlk = -1; +//Rbuf = 0; commented out in case we reuse last read block + } // end of Rewind + /* ------------------------ End of ZipFam ---------------------------- */ diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h index 37cc130311c..6d27cb67e81 100644 --- a/storage/connect/filamzip.h +++ b/storage/connect/filamzip.h @@ -1,169 +1,170 @@ -/************** FilAmZip H Declares Source Code File (.H) **************/ -/* Name: FILAMZIP.H Version 1.1 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2012 */ -/* */ -/* This file contains the GZIP access method classes declares. */ -/***********************************************************************/ -#ifndef __FILAMZIP_H -#define __FILAMZIP_H - -#include "zlib.h" - -typedef class ZIPFAM *PZIPFAM; -typedef class ZBKFAM *PZBKFAM; -typedef class ZIXFAM *PZIXFAM; -typedef class ZLBFAM *PZLBFAM; - -/***********************************************************************/ -/* This is the access method class declaration for not optimized */ -/* variable record length files compressed using the gzip library */ -/* functions. File is accessed record by record (row). */ -/***********************************************************************/ -class DllExport ZIPFAM : public TXTFAM { -// friend class DOSCOL; - public: - // Constructor - ZIPFAM(PDOSDEF tdp) : TXTFAM(tdp) {Zfile = NULL; Zpos = 0;} - ZIPFAM(PZIPFAM txfp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_ZIP;} - virtual int GetPos(void); - virtual int GetNextPos(void); - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZIPFAM(this);} - - // Methods - virtual void Reset(void); - virtual int GetFileLength(PGLOBAL g); - virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;} - virtual bool AllocateBuffer(PGLOBAL g); - virtual int GetRowID(void); - virtual bool RecordPos(PGLOBAL g); +/************** FilAmZip H Declares Source Code File (.H) **************/
+/* Name: FILAMZIP.H Version 1.2 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */
+/* */
+/* This file contains the GZIP access method classes declares. */
+/***********************************************************************/
+#ifndef __FILAMZIP_H
+#define __FILAMZIP_H
+
+#include "zlib.h"
+
+typedef class ZIPFAM *PZIPFAM;
+typedef class ZBKFAM *PZBKFAM;
+typedef class ZIXFAM *PZIXFAM;
+typedef class ZLBFAM *PZLBFAM;
+
+/***********************************************************************/
+/* This is the access method class declaration for not optimized */
+/* variable record length files compressed using the gzip library */
+/* functions. File is accessed record by record (row). */
+/***********************************************************************/
+class DllExport ZIPFAM : public TXTFAM {
+// friend class DOSCOL;
+ public:
+ // Constructor
+ ZIPFAM(PDOSDEF tdp) : TXTFAM(tdp) {Zfile = NULL; Zpos = 0;}
+ ZIPFAM(PZIPFAM txfp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+ virtual int GetPos(void);
+ virtual int GetNextPos(void);
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZIPFAM(this);}
+
+ // Methods
+ virtual void Reset(void);
+ virtual int GetFileLength(PGLOBAL g);
+ virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;}
+ virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int GetRowID(void);
+ virtual bool RecordPos(PGLOBAL g);
+ virtual bool SetPos(PGLOBAL g, int recpos);
+ virtual int SkipRecord(PGLOBAL g, bool header);
+ virtual bool OpenTableFile(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual int DeleteRecords(PGLOBAL g, int irc);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+ virtual void Rewind(void);
+
+ protected:
+ int Zerror(PGLOBAL g); // GZ error function
+
+ // Members
+ gzFile Zfile; // Points to GZ file structure
+ z_off_t Zpos; // Uncompressed file position
+ }; // end of class ZIPFAM
+
+/***********************************************************************/
+/* This is the access method class declaration for optimized variable */
+/* record length files compressed using the gzip library functions. */
+/* The File is accessed by block (requires an opt file). */
+/***********************************************************************/
+class DllExport ZBKFAM : public ZIPFAM {
+ public:
+ // Constructor
+ ZBKFAM(PDOSDEF tdp);
+ ZBKFAM(PZBKFAM txfp);
+
+ // Implementation
+ virtual int GetPos(void);
+ virtual int GetNextPos(void) {return 0;}
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZBKFAM(this);}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g);
+ virtual int MaxBlkSize(PGLOBAL g, int s);
+ virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int GetRowID(void);
+ virtual bool RecordPos(PGLOBAL g);
+ virtual int SkipRecord(PGLOBAL g, bool header);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual int DeleteRecords(PGLOBAL g, int irc);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+ virtual void Rewind(void);
+
+ protected:
+ // Members
+ char *CurLine; // Position of current line in buffer
+ char *NxtLine; // Position of Next line in buffer
+ bool Closing; // True when closing on Insert
+ }; // end of class ZBKFAM
+
+/***********************************************************************/
+/* This is the access method class declaration for fixed record */
+/* length files compressed using the gzip library functions. */
+/* The file is always accessed by block. */
+/***********************************************************************/
+class DllExport ZIXFAM : public ZBKFAM {
+ public:
+ // Constructor
+ ZIXFAM(PDOSDEF tdp);
+ ZIXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {}
+
+ // Implementation
+ virtual int GetNextPos(void) {return 0;}
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZIXFAM(this);}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g);
+ virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+
+ protected:
+ // No additional Members
+ }; // end of class ZIXFAM
+
+/***********************************************************************/
+/* This is the DOS/UNIX Access Method class declaration for PlugDB */
+/* fixed/variable files compressed using the zlib library functions. */
+/* Physically these are written and read using the same technique */
+/* than blocked variable files, only the contain of each block is */
+/* compressed using the deflate zlib function. The purpose of this */
+/* specific format is to have a fast mechanism for direct access of */
+/* records so blocked optimization is fast and direct access (joins) */
+/* is allowed. Note that the block length is written ahead of each */
+/* block to enable reading when optimization file is not available. */
+/***********************************************************************/
+class DllExport ZLBFAM : public BLKFAM {
+ public:
+ // Constructor
+ ZLBFAM(PDOSDEF tdp);
+ ZLBFAM(PZLBFAM txfp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZLIB;}
+ virtual int GetPos(void);
+ virtual int GetNextPos(void);
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZLBFAM(this);}
+ inline void SetOptimized(bool b) {Optimized = b;}
+
+ // Methods
+ virtual int GetFileLength(PGLOBAL g);
virtual bool SetPos(PGLOBAL g, int recpos); - virtual int SkipRecord(PGLOBAL g, bool header); - virtual bool OpenTableFile(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); - - protected: - int Zerror(PGLOBAL g); // GZ error function - - // Members - gzFile Zfile; // Points to GZ file structure - z_off_t Zpos; // Uncompressed file position - }; // end of class ZIPFAM - -/***********************************************************************/ -/* This is the access method class declaration for optimized variable */ -/* record length files compressed using the gzip library functions. */ -/* The File is accessed by block (requires an opt file). */ -/***********************************************************************/ -class DllExport ZBKFAM : public ZIPFAM { - public: - // Constructor - ZBKFAM(PDOSDEF tdp); - ZBKFAM(PZBKFAM txfp); - - // Implementation - virtual int GetPos(void); - virtual int GetNextPos(void) {return 0;} - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZBKFAM(this);} - - // Methods - virtual int Cardinality(PGLOBAL g); - virtual bool AllocateBuffer(PGLOBAL g); - virtual int GetRowID(void); - virtual bool RecordPos(PGLOBAL g); - virtual int SkipRecord(PGLOBAL g, bool header); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); - - protected: - // Members - char *CurLine; // Position of current line in buffer - char *NxtLine; // Position of Next line in buffer - bool Closing; // True when closing on Insert - }; // end of class ZBKFAM - -/***********************************************************************/ -/* This is the access method class declaration for fixed record */ -/* length files compressed using the gzip library functions. */ -/* The file is always accessed by block. */ -/***********************************************************************/ -class DllExport ZIXFAM : public ZBKFAM { - public: - // Constructor - ZIXFAM(PDOSDEF tdp); - ZIXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {} - - // Implementation - virtual int GetNextPos(void) {return 0;} - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZIXFAM(this);} - - // Methods - virtual int Cardinality(PGLOBAL g); - virtual bool AllocateBuffer(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - - protected: - // No additional Members - }; // end of class ZIXFAM - -#if 0 -/***********************************************************************/ -/* This is the DOS/UNIX Access Method class declaration for PlugDB */ -/* fixed/variable files compressed using the zlib library functions. */ -/* Physically these are written and read using the same technique */ -/* than blocked variable files, only the contain of each block is */ -/* compressed using the deflate zlib function. The purpose of this */ -/* specific format is to have a fast mechanism for direct access of */ -/* records so blocked optimization is fast and direct access (joins) */ -/* is allowed. Note that the block length is written ahead of each */ -/* block to enable reading when optimization file is not available. */ -/***********************************************************************/ -class DllExport ZLBFAM : public BLKFAM { - public: - // Constructor - ZLBFAM(PDOSDEF tdp); - ZLBFAM(PZLBFAM txfp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_ZLIB;} - virtual int GetPos(void); - virtual int GetNextPos(void); - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZLBFAM(this);} - inline void SetOptimized(bool b) {Optimized = b;} - - // Methods - virtual int GetFileLength(PGLOBAL g); - virtual bool AllocateBuffer(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); - - protected: - bool WriteCompressedBuffer(PGLOBAL g); - int ReadCompressedBuffer(PGLOBAL g, void *rdbuf); - - // Members - z_streamp Zstream; // Compression/decompression stream - Byte *Zbuffer; // Compressed block buffer - int *Zlenp; // Pointer to block length - bool Optimized; // true when opt file is available - }; // end of class ZLBFAM -#endif // 0 - -#endif // __FILAMZIP_H + virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+ virtual void Rewind(void);
+
+ protected:
+ bool WriteCompressedBuffer(PGLOBAL g);
+ int ReadCompressedBuffer(PGLOBAL g, void *rdbuf);
+
+ // Members
+ z_streamp Zstream; // Compression/decompression stream
+ Byte *Zbuffer; // Compressed block buffer
+ int *Zlenp; // Pointer to block length
+ bool Optimized; // true when opt file is available
+ }; // end of class ZLBFAM
+
+#endif // __FILAMZIP_H
diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp new file mode 100644 index 00000000000..9212432cdde --- /dev/null +++ b/storage/connect/filter.cpp @@ -0,0 +1,1733 @@ +/***************** Filter C++ Class Filter Code (.CPP) *****************/ +/* Name: FILTER.CPP Version 3.9 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */ +/* */ +/* This file contains the class FILTER function code. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant MariaDB header file. */ +/***********************************************************************/ +#include "my_global.h" +#include "sql_class.h" +//#include "sql_time.h" + +#if defined(WIN32) +//#include <windows.h> +#else // !WIN32 +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#endif // !WIN32 + + +/***********************************************************************/ +/* Include required application header files */ +/* global.h is header containing all global Plug declarations. */ +/* plgdbsem.h is header containing the DB applic. declarations. */ +/* xobject.h is header containing the XOBJECT derived classes dcls. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "tabcol.h" +#include "xtable.h" +#include "array.h" +//#include "subquery.h" +#include "filter.h" +//#include "token.h" +//#include "select.h" +#include "xindex.h" + +/***********************************************************************/ +/* Static variables. */ +/***********************************************************************/ +extern "C" int trace; + +/***********************************************************************/ +/* Utility routines. */ +/***********************************************************************/ +void PlugConvertConstant(PGLOBAL, void* &, short&); +//void *PlugCopyDB(PTABS, void*, INT); +void NewPointer(PTABS, void*, void*); +void AddPointer(PTABS, void*); + +static PPARM MakeParm(PGLOBAL g, PXOB xp) + { + PPARM pp = (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM)); + pp->Type = TYPE_XOBJECT; + pp->Value = xp; + pp->Domain = 0; + pp->Next = NULL; + return pp; + } // end of MakeParm + +/***********************************************************************/ +/* Routines called externally by FILTER function. */ +/***********************************************************************/ +bool PlugEvalLike(PGLOBAL, LPCSTR, LPCSTR, bool); +//bool ReadSubQuery(PGLOBAL, PSUBQ); +//PSUBQ OpenSubQuery(PGLOBAL, PSQL); +//void PlugCloseDB(PGLOBAL, PSQL); +BYTE OpBmp(PGLOBAL g, OPVAL opc); +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); + +/***********************************************************************/ +/* Routines called externally by CondFilter. */ +/***********************************************************************/ +PFIL MakeFilter(PGLOBAL g, PFIL fp1, OPVAL vop, PFIL fp2) + { + PFIL filp = new(g) FILTER(g, vop); + + filp->Arg(0) = fp1; + filp->Arg(1) = fp2; + + if (filp->Convert(g, false)) + return NULL; + + return filp; + } // end of MakeFilter + +PFIL MakeFilter(PGLOBAL g, PCOL *colp, POPER pop, PPARM pfirst, bool neg) +{ + PPARM parmp, pp[2]; + PFIL fp1, fp2, filp = NULL; + + if (pop->Val == OP_IN) { + PARRAY par = MakeValueArray(g, pfirst); + + if (par) { + pp[0] = MakeParm(g, colp[0]); + pp[1] = MakeParm(g, par); + fp1 = new(g) FILTER(g, pop, pp); + + if (fp1->Convert(g, false)) + return NULL; + + filp = (neg) ? MakeFilter(g, fp1, OP_NOT, NULL) : fp1; + } // endif par + + } else if (pop->Val == OP_XX) { // BETWEEN + if (pfirst && pfirst->Next) { + pp[0] = MakeParm(g, colp[0]); + pp[1] = pfirst; + fp1 = new(g) FILTER(g, neg ? OP_LT : OP_GE, pp); + + if (fp1->Convert(g, false)) + return NULL; + + pp[1] = pfirst->Next; + fp2 = new(g) FILTER(g, neg ? OP_GT : OP_LE, pp); + + if (fp2->Convert(g, false)) + return NULL; + + filp = MakeFilter(g, fp1, neg ? OP_OR : OP_AND, fp2); + } // endif parmp + + } else { + parmp = pfirst; + + for (int i = 0; i < 2; i++) + if (colp[i]) { + pp[i] = MakeParm(g, colp[i]); + } else { + if (!parmp || parmp->Domain != i) + return NULL; // Logical error, should never happen + + pp[i] = parmp; + parmp = parmp->Next; + } // endif colp + + filp = new(g) FILTER(g, pop, pp); + + if (filp->Convert(g, false)) + return NULL; + + } // endif's Val + + return filp; +} // end of MakeFilter + +/* --------------------------- Class FILTER -------------------------- */ + +/***********************************************************************/ +/* FILTER public constructors. */ +/***********************************************************************/ +FILTER::FILTER(PGLOBAL g, POPER pop, PPARM *tp) + { + Constr(g, pop->Val, pop->Mod, tp); + } // end of FILTER constructor + +FILTER::FILTER(PGLOBAL g, OPVAL opc, PPARM *tp) + { + Constr(g, opc, 0, tp); + } // end of FILTER constructor + +void FILTER::Constr(PGLOBAL g, OPVAL opc, int opm, PPARM *tp) + { + Next = NULL; + Opc = opc; + Opm = opm; + Bt = 0x00; + + for (int i = 0; i < 2; i++) { + Test[i].B_T = TYPE_VOID; + + if (tp && tp[i]) { + PlugConvertConstant(g, tp[i]->Value, tp[i]->Type); +#if defined(_DEBUG) + assert(tp[i]->Type == TYPE_XOBJECT); +#endif + Arg(i) = (PXOB)tp[i]->Value; + } else + Arg(i) = pXVOID; + + Val(i) = NULL; + Test[i].Conv = FALSE; + } // endfor i + + } // end of Constr + +/***********************************************************************/ +/* FILTER copy constructor. */ +/***********************************************************************/ +FILTER::FILTER(PFIL fil1) + { + Next = NULL; + Opc = fil1->Opc; + Opm = fil1->Opm; + Test[0] = fil1->Test[0]; + Test[1] = fil1->Test[1]; + } // end of FILTER copy constructor + +#if 0 +/***********************************************************************/ +/* Linearize: Does the linearization of the filter tree: */ +/* Independent filters (not implied in OR/NOT) will be separated */ +/* from others and filtering operations will be automated by */ +/* making a list of filter operations in polish operation style. */ +/* Returned value points to the first filter of the list, which ends */ +/* with the filter that was pointed by the first call argument, */ +/* except for separators, in which case a loop is needed to find it. */ +/* Note: a loop is used now in all cases (was not for OP_NOT) to be */ +/* able to handle the case of filters whose arguments are already */ +/* linearized, as it is done in LNA semantic routines. Indeed for */ +/* already linearized chains, the first filter is never an OP_AND, */ +/* OP_OR or OP_NOT filter, so this function just returns 'this'. */ +/***********************************************************************/ +PFIL FILTER::Linearize(bool nosep) + { + int i; + PFIL lfp[2], ffp[2] = {NULL,NULL}; + + switch (Opc) { + case OP_NOT: + if (GetArgType(0) == TYPE_FILTER) { + lfp[0] = (PFIL)Arg(0); + ffp[0] = lfp[0]->Linearize(TRUE); + } /* endif */ + + if (!ffp[0]) + return NULL; + + while (lfp[0]->Next) // See Note above + lfp[0] = lfp[0]->Next; + + Arg(0) = lfp[0]; + lfp[0]->Next = this; + break; + case OP_OR: + nosep = TRUE; + case OP_AND: + for (i = 0; i < 2; i++) { + if (GetArgType(i) == TYPE_FILTER) { + lfp[i] = (PFIL)Arg(i); + ffp[i] = lfp[i]->Linearize(nosep); + } /* endif */ + + if (!ffp[i]) + return NULL; + + while (lfp[i]->Next) + lfp[i] = lfp[i]->Next; + + Arg(i) = lfp[i]; + } /* endfor i */ + + if (nosep) { + lfp[0]->Next = ffp[1]; + lfp[1]->Next = this; + } else { + lfp[0]->Next = this; + Opc = OP_SEP; + Arg(1) = pXVOID; + Next = ffp[1]; + } /* endif */ + + break; + default: + ffp[0] = this; + } /* endswitch */ + + return (ffp[0]); + } // end of Linearize + +/***********************************************************************/ +/* Link the fil2 filter chain to the fil1(this) filter chain. */ +/***********************************************************************/ +PFIL FILTER::Link(PGLOBAL g, PFIL fil2) + { + PFIL fil1; + + if (trace) + htrc("Linking filter %p with op=%d... to filter %p with op=%d\n", + this, Opc, fil2, (fil2) ? fil2->Opc : 0); + + for (fil1 = this; fil1->Next; fil1 = fil1->Next) ; + + if (fil1->Opc == OP_SEP) + fil1->Next = fil2; // Separator already exists + else { + // Create a filter separator and insert it between the chains + PFIL filp = new(g) FILTER(g, OP_SEP); + + filp->Arg(0) = fil1; + filp->Next = fil2; + fil1->Next = filp; + } // endelse + + return (this); + } // end of Link + +/***********************************************************************/ +/* Remove eventual last separator from a filter chain. */ +/***********************************************************************/ +PFIL FILTER::RemoveLastSep(void) + { + PFIL filp, gfp = NULL; + + // Find last filter block (filp) and previous one (gfp). + for (filp = this; filp->Next; filp = filp->Next) + gfp = filp; + + // If last filter is a separator, remove it + if (filp->Opc == OP_SEP) + if (gfp) + gfp->Next = NULL; + else + return NULL; // chain is now empty + + return this; + } // end of RemoveLastSep + +/***********************************************************************/ +/* CheckColumn: Checks references to Columns in the filter and change */ +/* them into references to Col Blocks. */ +/* Returns the number of column references or -1 in case of column */ +/* not found and -2 in case of unrecoverable error. */ +/* WHERE filters are called with *aggreg == AGG_NO. */ +/* HAVING filters are called with *aggreg == AGG_ANY. */ +/***********************************************************************/ +int FILTER::CheckColumn(PGLOBAL g, PSQL sqlp, PXOB &p, int &ag) + { + char errmsg[MAX_STR] = ""; + int agg, k, n = 0; + + if (trace) + htrc("FILTER CheckColumn: sqlp=%p ag=%d\n", sqlp, ag); + + switch (Opc) { + case OP_SEP: + case OP_AND: + case OP_OR: + case OP_NOT: + return 0; // This because we are called for a linearized filter + default: + break; + } // endswitch Opc + + // Check all arguments even in case of error for when we are called + // from CheckHaving, where references to an alias raise an error but + // we must have all other arguments to be set. + for (int i = 0; i < 2; i++) { + if (GetArgType(i) == TYPE_FILTER) // Should never happen in + return 0; // current implementation + + agg = ag; + + if ((k = Arg(i)->CheckColumn(g, sqlp, Arg(i), agg)) < -1) { + return k; + } else if (k < 0) { + if (!*errmsg) // Keep first error message + strcpy(errmsg, g->Message); + + } else + n += k; + + } // endfor i + + if (*errmsg) { + strcpy(g->Message, errmsg); + return -1; + } else + return n; + + } // end of CheckColumn + +/***********************************************************************/ +/* RefNum: Find the number of references correlated sub-queries make */ +/* to the columns of the outer query (pointed by sqlp). */ +/***********************************************************************/ +int FILTER::RefNum(PSQL sqlp) + { + int n = 0; + + for (int i = 0; i < 2; i++) + n += Arg(i)->RefNum(sqlp); + + return n; + } // end of RefNum + +/***********************************************************************/ +/* CheckSubQuery: see SUBQUERY::CheckSubQuery for comment. */ +/***********************************************************************/ +PXOB FILTER::CheckSubQuery(PGLOBAL g, PSQL sqlp) + { + switch (Opc) { + case OP_SEP: + case OP_AND: + case OP_OR: + case OP_NOT: + break; + default: + for (int i = 0; i < 2; i++) + if (!(Arg(i) = (PXOB)Arg(i)->CheckSubQuery(g, sqlp))) + return NULL; + + break; + } // endswitch Opc + + return this; + } // end of CheckSubQuery + +/***********************************************************************/ +/* SortJoin: function that places ahead of the list the 'good' groups */ +/* for join filtering. These are groups with only one filter that */ +/* specify equality between two different table columns, at least */ +/* one is a table key column. Doing so the join filter will be in */ +/* general compatible with linearization of the joined table tree. */ +/* This function has been added a further sorting on column indexing. */ +/***********************************************************************/ +PFIL FILTER::SortJoin(PGLOBAL g) + { + int k; + PCOL cp1, cp2; + PTDBASE tp1, tp2; + PFIL fp, filp, gfp, filstart = this, filjoin = NULL, lfp = NULL; + bool join = TRUE, key = TRUE; + + // This routine requires that the chain ends with a separator + // So check for it and eventually add one if necessary + for (filp = this; filp->Next; filp = filp->Next) ; + + if (filp->Opc != OP_SEP) + filp->Next = new(g) FILTER(g, OP_SEP); + + again: + for (k = (key) ? 0 : MAX_MULT_KEY; k <= MAX_MULT_KEY; k++) + for (gfp = NULL, fp = filp = filstart; filp; filp = filp->Next) + switch (filp->Opc) { + case OP_SEP: + if (join) { + // Put this filter group into the join filter group list. + if (!lfp) + filjoin = fp; + else + lfp->Next = fp; + + if (!gfp) + filstart = filp->Next; + else + gfp->Next = filp->Next; + + lfp = filp; // last block of join filter list + } else + gfp = filp; // last block of bad filter list + + join = TRUE; + fp = filp->Next; + break; + case OP_LOJ: + case OP_ROJ: + case OP_DTJ: + join &= TRUE; + break; + case OP_EQ: + if (join && k > 0 // So specific join operators come first + && filp->GetArgType(0) == TYPE_COLBLK + && filp->GetArgType(1) == TYPE_COLBLK) { + cp1 = (PCOL)filp->Arg(0); + cp2 = (PCOL)filp->Arg(1); + tp1 = (PTDBASE)cp1->GetTo_Tdb(); + tp2 = (PTDBASE)cp2->GetTo_Tdb(); + + if (tp1->GetTdb_No() != tp2->GetTdb_No()) { + if (key) + join &= (cp1->GetKey() == k || cp2->GetKey() == k); + else + join &= (tp1->GetColIndex(cp1) || tp2->GetColIndex(cp2)); + + } else + join = FALSE; + + } else + join = FALSE; + + break; + default: + join = FALSE; + } // endswitch filp->Opc + + if (key) { + key = FALSE; + goto again; + } // endif key + + if (filjoin) { + lfp->Next = filstart; + filstart = filjoin; + } // endif filjoin + + // Removing last separator is perhaps unuseful, but it was so + return filstart->RemoveLastSep(); + } // end of SortJoin + +/***********************************************************************/ +/* Check that this filter is a good join filter. */ +/* If so the opj block will be set accordingly. */ +/* opj points to the join block, fprec to the filter block to which */ +/* the rest of the chain must be linked in case of success. */ +/* teq, tek and tk2 indicates the severity of the tests: */ +/* tk2 == TRUE means both columns must be primary keys. */ +/* tc2 == TRUE means both args must be columns (not expression). */ +/* tek == TRUE means at least one column must be a primary key. */ +/* teq == TRUE means the filter operator must be OP_EQ. */ +/* tix == TRUE means at least one column must be a simple index key. */ +/* thx == TRUE means at least one column must be a leading index key. */ +/***********************************************************************/ +bool FILTER::FindJoinFilter(POPJOIN opj, PFIL fprec, bool teq, bool tek, + bool tk2, bool tc2, bool tix, bool thx) + { + if (trace) + htrc("FindJoinFilter: opj=%p fprec=%p tests=(%d,%d,%d,%d)\n", + opj, fprec, teq, tek, tk2, tc2); + + // Firstly check that this filter is an independent filter + // meaning that it is the only one in its own group. + if (Next && Next->Opc != OP_SEP) + return (Opc < 0); + + // Keep only equi-joins and specific joins (Outer and Distinct) + // Normally specific join operators comme first because they have + // been placed first by SortJoin. + if (teq && Opc > OP_EQ) + return FALSE; + + // We have a candidate for join filter, now check that it + // fulfil the requirement about its operands, to point to + // columns of respectively the two TDB's of that join. + int col1 = 0, col2 = 0; + bool key = tk2; + bool idx = FALSE, ihx = FALSE; + PIXDEF pdx; + + for (int i = 0; i < 2; i++) + if (GetArgType(i) == TYPE_COLBLK) { + PCOL colp = (PCOL)Arg(i); + + if (tk2) + key &= (colp->IsKey()); + else + key |= (colp->IsKey()); + + pdx = ((PTDBASE)colp->GetTo_Tdb())->GetColIndex(colp); + idx |= (pdx && pdx->GetNparts() == 1); + ihx |= (pdx != NULL); + + if (colp->VerifyColumn(opj->GetTbx1())) + col1 = i + 1; + else if (colp->VerifyColumn(opj->GetTbx2())) + col2 = i + 1; + + } else if (!tc2 && GetArgType(i) != TYPE_CONST) { + PXOB xp = Arg(i); + + if (xp->VerifyColumn(opj->GetTbx1())) + col1 = i + 1; + else if (xp->VerifyColumn(opj->GetTbx2())) + col2 = i + 1; + + } else + return (Opc < 0); + + if (col1 == 0 || col2 == 0) + return (Opc < 0); + + if (((tek && !key) || (tix && !idx) || (thx && !ihx)) && Opc != OP_DTJ) + return FALSE; + + // This is the join filter, set the join block. + if (col1 == 1) { + opj->SetCol1(Arg(0)); + opj->SetCol2(Arg(1)); + } else { + opj->SetCol1(Arg(1)); + opj->SetCol2(Arg(0)); + + switch (Opc) { +// case OP_GT: Opc = OP_LT; break; +// case OP_LT: Opc = OP_GT; break; +// case OP_GE: Opc = OP_LE; break; +// case OP_LE: Opc = OP_GE; break; + case OP_LOJ: + case OP_ROJ: + case OP_DTJ: + // For expended join operators, the filter must indicate + // the way the join should be done, and not the order of + // appearance of tables in the table list (which is kept + // because tables are sorted in AddTdb). Therefore the + // join is inversed, not the filter. + opj->InverseJoin(); + default: break; + } // endswitch Opc + + } // endif col1 + + if (Opc < 0) { + // For join operators, special processing is needed + int knum = 0; + PFIL fp; + + switch (Opc) { + case OP_LOJ: + opj->SetJtype(JT_LEFT); + knum = opj->GetCol2()->GetKey(); + break; + case OP_ROJ: + opj->SetJtype(JT_RIGHT); + knum = opj->GetCol1()->GetKey(); + break; + case OP_DTJ: + for (knum = 1, fp = this->Next; fp; fp = fp->Next) + if (fp->Opc == OP_DTJ) + knum++; + else if (fp->Opc != OP_SEP) + break; + + opj->SetJtype(JT_DISTINCT); + opj->GetCol2()->SetKey(knum); + break; + default: + break; + } // endswitch Opc + + if (knum > 1) { + // Lets take care of a multiple key join + // We do a minimum of checking here as it will done later + int k = 1; + OPVAL op; + BYTE tmp[sizeof(Test[0])]; + + for (fp = this->Next; k < knum && fp; fp = fp->Next) { + switch (op = fp->Opc) { + case OP_SEP: + continue; + case OP_LOJ: + if (Opc == OP_ROJ) { + op = Opc; + memcpy(tmp, &fp->Test[0], sizeof(Test[0])); + fp->Test[0] = fp->Test[1]; + memcpy(&fp->Test[1], tmp, sizeof(Test[0])); + } // endif Opc + + k++; + break; + case OP_ROJ: + if (Opc == OP_LOJ) { + op = Opc; + memcpy(tmp, &fp->Test[0], sizeof(Test[0])); + fp->Test[0] = fp->Test[1]; + memcpy(&fp->Test[1], tmp, sizeof(Test[0])); + } // endif Opc + + k++; + break; + case OP_DTJ: + if (op == Opc && fp->GetArgType(1) == TYPE_COLBLK) + ((PCOL)fp->Arg(1))->SetKey(knum); + + k++; + break; + default: + break; + } // endswitch op + + if (op != Opc) + return TRUE; + + fp->Opc = OP_EQ; + } // endfor fp + + } // endif k + + Opc = OP_EQ; + } // endif Opc + + // Set the join filter operator + opj->SetOpc(Opc); + + // Now mark the columns involved in the join filter because + // this information will be used by the linearize program. + // Note: this should be replaced in the future by something + // enabling to mark tables as Parent or Child. + opj->GetCol1()->MarkCol(U_J_EXT); + opj->GetCol2()->MarkCol(U_J_EXT); + + // Remove the filter from the filter chain. If the filter is + // not last in the chain, also remove the SEP filter after it. + if (Next) // Next->Opc == OP_SEP + Next = Next->Next; + + if (!fprec) + opj->SetFilter(Next); + else + fprec->Next = Next; + + return FALSE; + } // end of FindJoinFilter + +/***********************************************************************/ +/* CheckHaving: check and process a filter of an HAVING clause. */ +/* Check references to Columns and Functions in the filter. */ +/* All these references can correspond to items existing in the */ +/* SELECT list, else if it is a function, allocate a SELECT block */ +/* to be added to the To_Sel list (non projected blocks). */ +/***********************************************************************/ +bool FILTER::CheckHaving(PGLOBAL g, PSQL sqlp) + { + int agg = AGG_ANY; + PXOB xp; + +//sqlp->SetOk(TRUE); // Ok to look into outer queries for filters + + switch (Opc) { + case OP_SEP: + case OP_AND: + case OP_OR: + case OP_NOT: + return FALSE; + default: + if (CheckColumn(g, sqlp, xp, agg) < -1) + return TRUE; // Unrecovable error + + break; + } // endswitch Opc + + sqlp->SetOk(TRUE); // Ok to look into outer queries for filters + + for (int i = 0; i < 2; i++) + if (!(xp = Arg(i)->SetSelect(g, sqlp, TRUE))) + return TRUE; + else if (xp != Arg(i)) { + Arg(i) = xp; + Val(i) = Arg(i)->GetValue(); + } // endif + + sqlp->SetOk(FALSE); + return FALSE; + } // end of CheckHaving + +/***********************************************************************/ +/* Used while building a table index. This function split the filter */ +/* attached to the tdbp table into the local and not local part. */ +/* The local filter is used to restrict the size of the index and the */ +/* not local part remains to be executed later. This has been added */ +/* recently and not only to improve the performance but chiefly to */ +/* avoid loosing rows when processing distinct joins. */ +/* Returns: */ +/* 0: the whole filter is local (both arguments are) */ +/* 1: the whole filter is not local */ +/* 2: the filter was split in local (attached to fp[0]) and */ +/* not local (attached to fp[1]). */ +/***********************************************************************/ +int FILTER::SplitFilter(PFIL *fp) + { + int i, rc[2]; + + if (Opc == OP_AND) { + for (i = 0; i < 2; i++) + rc[i] = ((PFIL)Arg(i))->SplitFilter(fp); + + // Filter first argument should never be split because of the + // algorithm used to de-linearize the filter. + assert(rc[0] != 2); + + if (rc[0] != rc[1]) { + // Splitting to be done + if (rc[1] == 2) { + // 2nd argument already split, add 1st to the proper filter + assert(fp[*rc]); + Arg(1) = fp[*rc]; + Val(1) = fp[*rc]->GetValue(); + fp[*rc] = this; + } else for (i = 0; i < 2; i++) { + // Split the filter arguments + assert(!fp[rc[i]]); + fp[rc[i]] = (PFIL)Arg(i); + } // endfor i + + *rc = 2; + } // endif rc + + } else + *rc = (CheckLocal(NULL)) ? 0 : 1; + + return *rc; + } // end of SplitFilter + +/***********************************************************************/ +/* This function is called when making a Kindex after the filter was */ +/* split in local and nolocal part in the case of many to many joins. */ +/* Indeed the whole filter must be reconstructed to take care of next */ +/* same values when doing the explosive join. In addition, the link */ +/* must be done respecting the way filters are de-linearized, no AND */ +/* filter in the first argument of an AND filter, because this is */ +/* expected to be true if SplitFilter is used again on this filter. */ +/***********************************************************************/ +PFIL FILTER::LinkFilter(PGLOBAL g, PFIL fp2) + { + PFIL fp1, filp, filand = NULL; + + assert(fp2); // Test must be made by caller + + // Find where the new AND filter must be attached + for (fp1 = this; fp1->Opc == OP_AND; fp1 = (PFIL)fp1->Arg(1)) + filand = fp1; + + filp = new(g) FILTER(g, OP_AND); + filp->Arg(0) = fp1; + filp->Val(0) = fp1->GetValue(); + filp->Test[0].B_T = TYPE_INT; + filp->Test[0].Conv = FALSE; + filp->Arg(1) = fp2; + filp->Val(1) = fp2->GetValue(); + filp->Test[1].B_T = TYPE_INT; + filp->Test[1].Conv = FALSE; + filp->Value = AllocateValue(g, TYPE_INT); + + if (filand) { + // filp must be inserted here + filand->Arg(1) = filp; + filand->Val(1) = filp->GetValue(); + filp = this; + } // endif filand + + return filp; + } // end of LinkFilter + +/***********************************************************************/ +/* Checks whether filter contains reference to a previous table that */ +/* is not logically joined to the currently openned table, or whether */ +/* it is a Sub-Select filter. In any case, local is set to FALSE. */ +/* Note: This function is now applied to de-linearized filters. */ +/***********************************************************************/ +bool FILTER::CheckLocal(PTDB tdbp) + { + bool local = TRUE; + + if (trace) { + if (tdbp) + htrc("CheckLocal: filp=%p R%d\n", this, tdbp->GetTdb_No()); + else + htrc("CheckLocal: filp=%p\n", this); + } // endif trace + + for (int i = 0; local && i < 2; i++) + local = Arg(i)->CheckLocal(tdbp); + + if (trace) + htrc("FCL: returning %d\n", local); + + return (local); + } // end of CheckLocal + +/***********************************************************************/ +/* This routine is used to split the filter attached to the tdbp */ +/* table into the local and not local part where "local" means that */ +/* it applies "locally" to the FILEID special column with crit = 2 */ +/* and to the SERVID and/or TABID special columns with crit = 3. */ +/* Returns: */ +/* 0: the whole filter is local (both arguments are) */ +/* 1: the whole filter is not local */ +/* 2: the filter was split in local (attached to fp[0]) and */ +/* not local (attached to fp[1]). */ +/* Note: "Locally" means that the "local" filter can be evaluated */ +/* before opening the table. This implies that the special column be */ +/* compared only with constants and that this filter not to be or'ed */ +/* with a non "local" filter. */ +/***********************************************************************/ +int FILTER::SplitFilter(PFIL *fp, PTDB tp, int crit) + { + int i, rc[2]; + + if (Opc == OP_AND) { + for (i = 0; i < 2; i++) + rc[i] = ((PFIL)Arg(i))->SplitFilter(fp, tp, crit); + + // Filter first argument should never be split because of the + // algorithm used to de-linearize the filter. + assert(rc[0] != 2); + + if (rc[0] != rc[1]) { + // Splitting to be done + if (rc[1] == 2) { + // 2nd argument already split, add 1st to the proper filter + assert(fp[*rc]); + Arg(1) = fp[*rc]; + Val(1) = fp[*rc]->GetValue(); + fp[*rc] = this; + } else for (i = 0; i < 2; i++) { + // Split the filter arguments + assert(!fp[rc[i]]); + fp[rc[i]] = (PFIL)Arg(i); + } // endfor i + + *rc = 2; + } // endif rc + + } else + *rc = (CheckSpcCol(tp, crit) == 1) ? 0 : 1; + + return *rc; + } // end of SplitFilter + +/***********************************************************************/ +/* Checks whether filter contains only references to FILEID, SERVID, */ +/* or TABID with constants or pseudo constants. */ +/***********************************************************************/ +int FILTER::CheckSpcCol(PTDB tdbp, int n) + { + int n1 = Arg(0)->CheckSpcCol(tdbp, n); + int n2 = Arg(1)->CheckSpcCol(tdbp, n); + + return max(n1, n2); + } // end of CheckSpcCol +#endif // 0 + +/***********************************************************************/ +/* Reset the filter arguments to non evaluated yet. */ +/***********************************************************************/ +void FILTER::Reset(void) + { + for (int i = 0; i < 2; i++) + Arg(i)->Reset(); + + } // end of Reset + +/***********************************************************************/ +/* Init: called when reinitializing a query (Correlated subqueries) */ +/***********************************************************************/ +bool FILTER::Init(PGLOBAL g) + { + for (int i = 0; i < 2; i++) + Arg(i)->Init(g); + + return FALSE; + } // end of Init + +/***********************************************************************/ +/* Convert: does all filter setting and conversions. */ +/* (having = TRUE for Having Clauses, FALSE for Where Clauses) */ +/* Note: hierarchy of types is implied by the ConvertType */ +/* function, currently FLOAT, int, STRING and TOKEN. */ +/* Returns FALSE if successful or TRUE in case of error. */ +/* Note on result type for filters: */ +/* Currently the result type is of TYPE_INT (should be TYPE_BOOL). */ +/* This avoids to introduce a new type and perhaps will permit */ +/* conversions. However the boolean operators will result in a */ +/* boolean int result, meaning that result shall be only 0 or 1 . */ +/***********************************************************************/ +bool FILTER::Convert(PGLOBAL g, bool having) + { + int i, comtype = TYPE_ERROR; + + if (trace) + htrc("converting(?) %s %p opc=%d\n", + (having) ? "having" : "filter", this, Opc); + + for (i = 0; i < 2; i++) { + switch (GetArgType(i)) { + case TYPE_COLBLK: + if (((PCOL)Arg(i))->InitValue(g)) + return TRUE; + + break; + case TYPE_ARRAY: + if ((Opc != OP_IN && !Opm) || i == 0) { + strcpy(g->Message, MSG(BAD_ARRAY_OPER)); + return TRUE; + } // endif + + if (((PARRAY)Arg(i))->Sort(g)) // Sort the array + return TRUE; // Error + + break; + case TYPE_VOID: + if (i == 1) { + Val(0) = Arg(0)->GetValue(); + goto TEST; // Filter has only one argument + } // endif i + + strcpy(g->Message, MSG(VOID_FIRST_ARG)); + return TRUE; + } // endswitch + + if (trace) + htrc("Filter(%d): Arg type=%d\n", i, GetArgType(i)); + + // Set default values + Test[i].B_T = Arg(i)->GetResultType(); + Test[i].Conv = FALSE; + + // Special case of the LIKE operator. + if (Opc == OP_LIKE) { + if (!IsTypeChar((int)Test[i].B_T)) { + sprintf(g->Message, MSG(BAD_TYPE_LIKE), i, Test[i].B_T); + return TRUE; + } // endif + + comtype = TYPE_STRING; + } else { + // Set the common type for both (eventually converted) arguments + int argtyp = Test[i].B_T; + + if (GetArgType(i) == TYPE_CONST && argtyp == TYPE_INT) { + // If possible, downcast the type to smaller types to avoid + // convertion as much as possible. + int n = Arg(i)->GetValue()->GetIntValue(); + + if (n >= INT_MIN8 && n <= INT_MAX8) + argtyp = TYPE_TINY; + else if (n >= INT_MIN16 && n <= INT_MAX16) + argtyp = TYPE_SHORT; + + } else if (GetArgType(i) == TYPE_ARRAY) { + // If possible, downcast int arrays target type to TYPE_SHORT + // to take care of filters written like shortcol in (34,35,36). + if (((PARRAY)Arg(i))->CanBeShort()) + argtyp = TYPE_SHORT; + + } // endif TYPE_CONST + + comtype = ConvertType(comtype, argtyp, CNV_ANY); + } // endif Opc + + if (comtype == TYPE_ERROR) { + strcpy(g->Message, MSG(ILL_FILTER_CONV)); + return TRUE; + } // endif + + if (trace) + htrc(" comtype=%d, B_T(%d)=%d Val(%d)=%p\n", + comtype, i, Test[i].B_T, i, Val(i)); + + } // endfor i + + // Set or allocate the filter argument values and buffers + for (i = 0; i < 2; i++) { + if (trace) + htrc(" conv type %d ? i=%d B_T=%d comtype=%d\n", + GetArgType(i), i, Test[i].B_T, comtype); + + if (Test[i].B_T == comtype) { + // No conversion, set Value to argument Value + Val(i) = Arg(i)->GetValue(); +#if defined(_DEBUG) + assert (Val(i) && Val(i)->GetType() == Test[i].B_T); +#endif + } else { + // Conversion between filter arguments to be done. + // Note that the argument must be converted, not only the + // buffer and buffer type, so GetArgType() returns the new type. + switch (GetArgType(i)) { + case TYPE_CONST: + if (comtype == TYPE_DATE && Test[i].B_T == TYPE_STRING) { + // Convert according to the format of the other argument + Val(i) = AllocateValue(g, comtype, Arg(i)->GetLength()); + + if (((DTVAL*)Val(i))->SetFormat(g, Val(1-i))) + return TRUE; + + Val(i)->SetValue_psz(Arg(i)->GetValue()->GetCharValue()); + } else { + ((PCONST)Arg(i))->Convert(g, comtype); + Val(i) = Arg(i)->GetValue(); + } // endif comtype + + break; + case TYPE_ARRAY: + // Conversion PSZ or int array to int or double FLOAT. + if (((PARRAY)Arg(i))->Convert(g, comtype, Val(i-1)) == TYPE_ERROR) + return TRUE; + + break; + case TYPE_FILTER: + strcpy(g->Message, MSG(UNMATCH_FIL_ARG)); + return TRUE; + default: + // Conversion from Column, Select/Func, Expr, Scalfnc... + // The argument requires conversion during Eval + // A separate Value block must be allocated. + // Note: the test on comtype is to prevent unnecessary + // domain initialization and get the correct length in + // case of Token -> numeric conversion. + Val(i) = AllocateValue(g, comtype, (comtype == TYPE_STRING) + ? Arg(i)->GetLengthEx() : Arg(i)->GetLength()); + + if (comtype == TYPE_DATE && Test[i].B_T == TYPE_STRING) + // Convert according to the format of the other argument + if (((DTVAL*)Val(i))->SetFormat(g, Val(1 - i))) + return TRUE; + + Test[i].Conv = TRUE; + break; + } // endswitch GetType + + Test[i].B_T = comtype; + } // endif comtype + + } // endfor i + + // Last check to be sure all is correct. + if (Test[0].B_T != Test[1].B_T) { + sprintf(g->Message, MSG(BAD_FILTER_CONV), Test[0].B_T, Test[1].B_T); + return TRUE; +//} else if (Test[0].B_T == TYPE_LIST && +// ((LSTVAL*)Val(0))->GetN() != ((LSTVAL*)Val(1))->GetN()) { +// sprintf(g->Message, MSG(ROW_ARGNB_ERR), +// ((LSTVAL*)Val(0))->GetN(), ((LSTVAL*)Val(1))->GetN()); +// return TRUE; + } // endif's B_T + + + TEST: // Test for possible Eval optimization + + if (trace) + htrc("Filp %p op=%d argtypes=(%d,%d)\n", + this, Opc, GetArgType(0), GetArgType(1)); + + // Check whether we have a "simple" filter and in that case + // change its class so an optimized Eval function will be used + if (!Test[0].Conv && !Test[1].Conv) { + if (Opm) switch (Opc) { + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: + if (GetArgType(1) != TYPE_ARRAY) + break; // On subquery, do standard processing + + // Change the FILTER class to FILTERIN + new(this) FILTERIN; + break; + default: + break; + } // endswitch Opc + + else switch (Opc) { +#if 0 + case OP_EQ: new(this) FILTEREQ; break; + case OP_NE: new(this) FILTERNE; break; + case OP_GT: new(this) FILTERGT; break; + case OP_GE: new(this) FILTERGE; break; + case OP_LT: new(this) FILTERLT; break; + case OP_LE: new(this) FILTERLE; break; +#endif // 0 + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: new(this) FILTERCMP(g); break; + case OP_AND: new(this) FILTERAND; break; + case OP_OR: new(this) FILTEROR; break; + case OP_NOT: new(this) FILTERNOT; break; + case OP_EXIST: + if (GetArgType(1) == TYPE_VOID) { + // For EXISTS it is the first argument that should be null + Arg(1) = Arg(0); + Arg(0) = pXVOID; + } // endif void + + // pass thru + case OP_IN: + // For IN operator do optimize if operand is an array + if (GetArgType(1) != TYPE_ARRAY) + break; // IN on subquery, do standard processing + + // Change the FILTER class to FILTERIN + new(this) FILTERIN; + break; + default: + break; + } // endswitch Opc + + } // endif Conv + + // The result value (should be TYPE_BOOL ???) + Value = AllocateValue(g, TYPE_INT); + return FALSE; + } // end of Convert + +/***********************************************************************/ +/* Eval: Compute filter result value. */ +/* New algorithm: evaluation is now done from the root for each group */ +/* so Eval is now a recursive process for FILTER operands. */ +/***********************************************************************/ +bool FILTER::Eval(PGLOBAL g) + { + int i; // n = 0; +//PSUBQ subp = NULL; + PARRAY ap = NULL; + PDBUSER dup = PlgGetUser(g); + + if (Opc <= OP_XX) + for (i = 0; i < 2; i++) + // Evaluate the object and eventually convert it. + if (Arg(i)->Eval(g)) + return TRUE; + else if (Test[i].Conv) + Val(i)->SetValue_pval(Arg(i)->GetValue()); + + if (trace) + htrc(" Filter: op=%d type=%d %d B_T=%d %d val=%p %p\n", + Opc, GetArgType(0), GetArgType(1), Test[0].B_T, Test[1].B_T, + Val(0), Val(1)); + + // Main switch on filtering according to operator type. + switch (Opc) { + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: + if (!Opm) { + // Comparison boolean operators. +#if defined(_DEBUG) + if (Val(0)->GetType() != Val(1)->GetType()) + goto FilterError; +#endif + // Compare the two arguments + // New algorithm to take care of TYPE_LIST + Bt = OpBmp(g, Opc); + Value->SetValue_bool(!(Val(0)->TestValue(Val(1)) & Bt)); + break; + } // endif Opm + + // For modified operators, pass thru + case OP_IN: + case OP_EXIST: + // For IN operations, special processing is done here + switch (GetArgType(1)) { + case TYPE_ARRAY: + ap = (PARRAY)Arg(1); + break; + default: + strcpy(g->Message, MSG(IN_WITHOUT_SUB)); + goto FilterError; + } // endswitch Type + + if (trace) { + htrc(" IN filtering: ap=%p\n", ap); + + if (ap) + htrc(" Array: type=%d size=%d other_type=%d\n", + ap->GetType(), ap->GetSize(), Test[0].B_T); + + } // endif trace + + /*****************************************************************/ + /* Implementation note: The Find function is now able to do a */ + /* conversion but limited to SHORT, int, and FLOAT arrays. */ + /*****************************************************************/ +// Value->SetValue_bool(ap->Find(g, Val(0))); + + if (ap) + Value->SetValue_bool(ap->FilTest(g, Val(0), Opc, Opm)); + + break; + + case OP_LIKE: +#if defined(_DEBUG) + if (!IsTypeChar((int)Test[0].B_T) || !IsTypeChar((int)Test[1].B_T)) + goto FilterError; +#endif + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(PlugEvalLike(g, Val(0)->GetCharValue(), + Val(1)->GetCharValue(), + Val(0)->IsCi())); + break; + + case OP_AND: +#if defined(_DEBUG) + if (Test[0].B_T != TYPE_INT || Test[1].B_T != TYPE_INT) + goto FilterError; +#endif + + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (!Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + break; + + case OP_OR: +#if defined(_DEBUG) + if (Test[0].B_T != TYPE_INT || Test[1].B_T != TYPE_INT) + goto FilterError; +#endif + + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + break; + + case OP_NOT: +#if defined(_DEBUG) + if (Test[0].B_T != TYPE_INT) // Should be type bool ??? + goto FilterError; +#endif + + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(!Val(0)->GetIntValue()); + break; + + case OP_SEP: // No more used while evaluating + default: + goto FilterError; + } // endswitch Opc + + if (trace) + htrc("Eval: filter %p Opc=%d result=%d\n", + this, Opc, Value->GetIntValue()); + + return FALSE; + + FilterError: + sprintf(g->Message, MSG(BAD_FILTER), + Opc, Test[0].B_T, Test[1].B_T, GetArgType(0), GetArgType(1)); + return TRUE; + } // end of Eval + +#if 0 +/***********************************************************************/ +/* Called by PlugCopyDB to make a copy of a (linearized) filter chain.*/ +/***********************************************************************/ +PFIL FILTER::Copy(PTABS t) + { + int i; + PFIL fil1, fil2, newfilchain = NULL, fprec = NULL; + + for (fil1 = this; fil1; fil1 = fil1->Next) { + fil2 = new(t->G) FILTER(fil1); + + if (!fprec) + newfilchain = fil2; + else + fprec->Next = fil2; + + NewPointer(t, fil1, fil2); + + for (i = 0; i < 2; i++) + if (fil1->GetArgType(i) == TYPE_COLBLK || + fil1->GetArgType(i) == TYPE_FILTER) + AddPointer(t, &fil2->Arg(i)); + + fprec = fil2; + } /* endfor fil1 */ + + return newfilchain; + } // end of Copy +#endif // 0 + +/*********************************************************************/ +/* Make file output of FILTER contents. */ +/*********************************************************************/ +void FILTER::Print(PGLOBAL g, FILE *f, uint n) + { + char m[64]; + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + + bool lin = (Next != NULL); // lin == TRUE if linearized + + for (PFIL fp = this; fp; fp = fp->Next) { + fprintf(f, "%sFILTER: at %p opc=%d lin=%d result=%d\n", + m, fp, fp->Opc, lin, + (Value) ? Value->GetIntValue() : 0); + + for (int i = 0; i < 2; i++) { + fprintf(f, "%s Arg(%d) type=%d value=%p B_T=%d val=%p\n", + m, i, fp->GetArgType(i), fp->Arg(i), + fp->Test[i].B_T, fp->Val(i)); + + if (lin && fp->GetArgType(i) == TYPE_FILTER) + fprintf(f, "%s Filter at %p\n", m, fp->Arg(i)); + else + fp->Arg(i)->Print(g, f, n + 2); + + } // endfor i + + } // endfor fp + + } // end of Print + +/***********************************************************************/ +/* Make string output of TABLE contents (z should be checked). */ +/***********************************************************************/ +void FILTER::Print(PGLOBAL g, char *ps, uint z) + { + #define FLEN 100 + + typedef struct _bc { + struct _bc *Next; + char Cold[FLEN+1]; + } BC, *PBC; + + char *p; + int n; + PFIL fp; + PBC bxp, bcp = NULL; + + *ps = '\0'; + + for (fp = this; fp && z > 0; fp = fp->Next) { + if (fp->Opc < OP_CNC || fp->Opc == OP_IN || fp->Opc == OP_NULL + || fp->Opc == OP_LIKE || fp->Opc == OP_EXIST) { + if (!(bxp = new BC)) { + strncat(ps, "Filter(s)", z); + return; + } /* endif */ + + bxp->Next = bcp; + bcp = bxp; + p = bcp->Cold; + n = FLEN; + fp->Arg(0)->Print(g, p, n); + n = FLEN - strlen(p); + + switch (fp->Opc) { + case OP_EQ: + strncat(bcp->Cold, "=", n); + break; + case OP_NE: + strncat(bcp->Cold, "!=", n); + break; + case OP_GT: + strncat(bcp->Cold, ">", n); + break; + case OP_GE: + strncat(bcp->Cold, ">=", n); + break; + case OP_LT: + strncat(bcp->Cold, "<", n); + break; + case OP_LE: + strncat(bcp->Cold, "<=", n); + break; + case OP_IN: + strncat(bcp->Cold, " in ", n); + break; + case OP_NULL: + strncat(bcp->Cold, " is null", n); + break; + case OP_LIKE: + strncat(bcp->Cold, " like ", n); + break; + case OP_EXIST: + strncat(bcp->Cold, " exists ", n); + break; + case OP_AND: + strncat(bcp->Cold, " and ", n); + break; + case OP_OR: + strncat(bcp->Cold, " or ", n); + break; + default: + strncat(bcp->Cold, "?", n); + } // endswitch Opc + + n = FLEN - strlen(p); + p += strlen(p); + fp->Arg(1)->Print(g, p, n); + } else + if (!bcp) { + strncat(ps, "???", z); + z -= 3; + } else + switch (fp->Opc) { + case OP_SEP: // Filter list separator + strncat(ps, bcp->Cold, z); + z -= strlen(bcp->Cold); + strncat(ps, ";", z--); + bxp = bcp->Next; + delete bcp; + bcp = bxp; + break; + case OP_NOT: // Filter NOT operator + for (n = MY_MIN((int)strlen(bcp->Cold), FLEN-3); n >= 0; n--) + bcp->Cold[n+2] = bcp->Cold[n]; + bcp->Cold[0] = '^'; + bcp->Cold[1] = '('; + strcat(bcp->Cold, ")"); + break; + default: + for (n = MY_MIN((int)strlen(bcp->Cold), FLEN-4); n >= 0; n--) + bcp->Cold[n+3] = bcp->Cold[n]; + bcp->Cold[0] = ')'; + switch (fp->Opc) { + case OP_AND: bcp->Cold[1] = '&'; break; + case OP_OR: bcp->Cold[1] = '|'; break; + default: bcp->Cold[1] = '?'; + } // endswitch + bcp->Cold[2] = '('; + strcat(bcp->Cold, ")"); + bxp = bcp->Next; + for (n = MY_MIN((int)strlen(bxp->Cold), FLEN-1); n >= 0; n--) + bxp->Cold[n+1] = bxp->Cold[n]; + bxp->Cold[0] = '('; + strncat(bxp->Cold, bcp->Cold, FLEN-strlen(bxp->Cold)); + delete bcp; + bcp = bxp; + } // endswitch + + } // endfor fp + + n = 0; + + if (!bcp) + strncat(ps, "Null-Filter", z); + else do { + if (z > 0) { + if (n++ > 0) { + strncat(ps, "*?*", z); + z = MY_MAX(0, (int)z-3); + } // endif + strncat(ps, bcp->Cold, z); + z -= strlen(bcp->Cold); + } // endif + + bxp = bcp->Next; + delete bcp; + bcp = bxp; + } while (bcp); // enddo + + } // end of Print + + +/* -------------------- Derived Classes Functions -------------------- */ + +/***********************************************************************/ +/* FILTERCMP constructor. */ +/***********************************************************************/ +FILTERCMP::FILTERCMP(PGLOBAL g) + { + Bt = OpBmp(g, Opc); + } // end of FILTERCMP constructor + +/***********************************************************************/ +/* Eval: Compute result value for comparison operators. */ +/***********************************************************************/ +bool FILTERCMP::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g) || Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue_bool(!(Val(0)->TestValue(Val(1)) & Bt)); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for AND filters. */ +/***********************************************************************/ +bool FILTERAND::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (!Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for OR filters. */ +/***********************************************************************/ +bool FILTEROR::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for NOT filters. */ +/***********************************************************************/ +bool FILTERNOT::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(!Val(0)->GetIntValue()); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for IN filters. */ +/***********************************************************************/ +bool FILTERIN::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(((PARRAY)Arg(1))->FilTest(g, Val(0), Opc, Opm)); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* FILTERTRUE does nothing and returns TRUE. */ +/***********************************************************************/ +void FILTERTRUE::Reset(void) + { + } // end of Reset + +bool FILTERTRUE::Eval(PGLOBAL) + { + return FALSE; + } // end of Eval + +/* ------------------------- Friend Functions ------------------------ */ + +#if 0 +/***********************************************************************/ +/* Prepare: prepare a filter for execution. This implies two things: */ +/* 1) de-linearize the filter to be able to evaluate it recursively. */ +/* This permit to conditionally evaluate only the first argument */ +/* of OP_OR and OP_AND filters without having to pass by an */ +/* intermediate Apply function (as this has a performance cost). */ +/* 2) do all the necessary conversion for all filter block arguments. */ +/***********************************************************************/ +PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) + { + PFIL filp = NULL; + + if (trace) + htrc("PrepareFilter: fp=%p having=%d\n", fp, having); +//if (fp) +// fp->Print(g, debug, 0); + + while (fp) { + if (fp->Opc == OP_SEP) + // If separator is not last transform it into an AND filter + if (fp->Next) { + filp = PrepareFilter(g, fp->Next, having); + fp->Arg(1) = filp; + fp->Opc = OP_AND; + fp->Next = NULL; // This will end the loop + } else + break; // Remove eventual ending separator(s) + +// if (fp->Convert(g, having)) +// longjmp(g->jumper[g->jump_level], TYPE_FILTER); + + filp = fp; + fp = fp->Next; + filp->Next = NULL; + } // endwhile + + if (trace) + htrc(" returning filp=%p\n", filp); +//if (filp) +// filp->Print(g, debug, 0); + + return filp; + } // end of PrepareFilter +#endif // 0 + +/***********************************************************************/ +/* ApplyFilter: Apply filtering for a table (where or having clause). */ +/* New algorithm: evaluate from the root a de-linearized filter so */ +/* AND/OR clauses can be optimized throughout the whole tree. */ +/***********************************************************************/ +DllExport bool ApplyFilter(PGLOBAL g, PFIL filp) + { + if (!filp) + return TRUE; + + // Must be done for null tables + filp->Reset(); + +//if (tdbp && tdbp->IsNull()) +// return TRUE; + + if (filp->Eval(g)) + longjmp(g->jumper[g->jump_level], TYPE_FILTER); + + if (trace > 1) + htrc("PlugFilter filp=%p result=%d\n", + filp, filp->GetResult()); + + return filp->GetResult(); + } // end of ApplyFilter diff --git a/storage/connect/filter.h b/storage/connect/filter.h new file mode 100644 index 00000000000..78e066d9ab7 --- /dev/null +++ b/storage/connect/filter.h @@ -0,0 +1,178 @@ +/*************** Filter H Declares Source Code File (.H) ***************/ +/* Name: FILTER.H Version 1.2 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2010-2012 */ +/* */ +/* This file contains the FILTER and derived classes declares. */ +/***********************************************************************/ +#ifndef __FILTER__ +#define __FILTER__ + +/***********************************************************************/ +/* Include required application header files */ +/***********************************************************************/ +#include "xobject.h" + +/***********************************************************************/ +/* Utilities for WHERE condition building. */ +/***********************************************************************/ +PFIL MakeFilter(PGLOBAL g, PFIL filp, OPVAL vop, PFIL fp); +PFIL MakeFilter(PGLOBAL g, PCOL *colp, POPER pop, PPARM pfirst, bool neg); + +/***********************************************************************/ +/* Definition of class FILTER with all its method functions. */ +/* Note: Most virtual implementation functions are not in use yet */ +/* but could be in future system evolution. */ +/***********************************************************************/ +class DllExport FILTER : public XOBJECT { /* Filter description block */ +//friend PFIL PrepareFilter(PGLOBAL, PFIL, bool); + friend DllExport bool ApplyFilter(PGLOBAL, PFIL); + public: + // Constructors + FILTER(PGLOBAL g, POPER pop, PPARM *tp = NULL); + FILTER(PGLOBAL g, OPVAL opc, PPARM *tp = NULL); + FILTER(PFIL fil1); + + // Implementation + virtual int GetType(void) {return TYPE_FILTER;} + virtual int GetResultType(void) {return TYPE_INT;} + virtual int GetLength(void) {return 1;} + virtual int GetLengthEx(void) {assert(FALSE); return 0;} + virtual int GetScale() {return 0;}; + PFIL GetNext(void) {return Next;} + OPVAL GetOpc(void) {return Opc;} + int GetOpm(void) {return Opm;} + int GetArgType(int i) {return Arg(i)->GetType();} + bool GetResult(void) {return Value->GetIntValue() != 0;} + PXOB &Arg(int i) {return Test[i].Arg;} + PVAL &Val(int i) {return Test[i].Value;} + bool &Conv(int i) {return Test[i].Conv;} + void SetNext(PFIL filp) {Next = filp;} + + // Methods + virtual void Reset(void); + virtual bool Compare(PXOB) {return FALSE;} // Not used yet + virtual bool Init(PGLOBAL); + virtual bool Eval(PGLOBAL); + virtual bool SetFormat(PGLOBAL, FORMAT&) {return TRUE;} // NUY +//virtual int CheckColumn(PGLOBAL g, PSQL sqlp, PXOB &xp, int &ag); +//virtual int RefNum(PSQL); +//virtual PXOB SetSelect(PGLOBAL, PSQL, bool) {return NULL;} // NUY +//virtual PXOB CheckSubQuery(PGLOBAL, PSQL); +//virtual bool CheckLocal(PTDB); +//virtual int CheckSpcCol(PTDB tdbp, int n); + virtual void Print(PGLOBAL g, FILE *f, uint n); + virtual void Print(PGLOBAL g, char *ps, uint z); +// PFIL Linearize(bool nosep); +// PFIL Link(PGLOBAL g, PFIL fil2); +// PFIL RemoveLastSep(void); +// PFIL SortJoin(PGLOBAL g); +// bool FindJoinFilter(POPJOIN opj, PFIL fprec, bool teq, +// bool tek, bool tk2, bool tc2, bool tix, bool thx); +// bool CheckHaving(PGLOBAL g, PSQL sqlp); + bool Convert(PGLOBAL g, bool having); +// int SplitFilter(PFIL *fp); +// int SplitFilter(PFIL *fp, PTDB tp, int n); +// PFIL LinkFilter(PGLOBAL g, PFIL fp2); +// PFIL Copy(PTABS t); + + protected: + FILTER(void) {} // Standard constructor not to be used + void Constr(PGLOBAL g, OPVAL opc, int opm, PPARM *tp); + + // Members + PFIL Next; // Used for linearization + OPVAL Opc; // Comparison operator + int Opm; // Modificator + BYTE Bt; // Operator bitmap + struct { + int B_T; // Buffer type + PXOB Arg; // Points to argument + PVAL Value; // Points to argument value + bool Conv; // TRUE if argument must be converted + } Test[2]; + }; // end of class FILTER + +/***********************************************************************/ +/* Derived class FILTERX: used to replace a filter by a derived class */ +/* using an Eval method optimizing the filtering evaluation. */ +/* Note: this works only if the members of the derived class are the */ +/* same than the ones of the original class (NO added members). */ +/***********************************************************************/ +class FILTERX : public FILTER { + public: + // Methods + virtual bool Eval(PGLOBAL) = 0; // just to prevent direct FILTERX use + + // Fake operator new used to change a filter into a derived filter + void * operator new(size_t size, PFIL filp) {return filp;} +#if defined(WIN32) + // Avoid warning C4291 by defining a matching dummy delete operator + void operator delete(void *, PFIL) {} +#else + void operator delete(void *) {} +#endif + }; // end of class FILTERX + +/***********************************************************************/ +/* Derived class FILTEREQ: OP_EQ, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTERCMP : public FILTERX { + public: + // Constructor + FILTERCMP(PGLOBAL g); + + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTEREQ + +/***********************************************************************/ +/* Derived class FILTERAND: OP_AND, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTERAND : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTERAND + +/***********************************************************************/ +/* Derived class FILTEROR: OP_OR, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTEROR : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTEROR + +/***********************************************************************/ +/* Derived class FILTERNOT: OP_NOT, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTERNOT : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTERNOT + +/***********************************************************************/ +/* Derived class FILTERIN: OP_IN, no conversion and Array 2nd arg. */ +/***********************************************************************/ +class FILTERIN : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTERIN + +/***********************************************************************/ +/* Derived class FILTERTRUE: Always returns TRUE. */ +/***********************************************************************/ +class FILTERTRUE : public FILTERX { + public: + // Constructor + FILTERTRUE(PVAL valp) {Value = valp; Value->SetValue_bool(TRUE);} + + // Methods + virtual void Reset(void); + virtual bool Eval(PGLOBAL); + }; // end of class FILTERTRUE + +#endif // __FILTER__ diff --git a/storage/connect/global.h b/storage/connect/global.h index c0746e37db7..d35cef2de6f 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -1,6 +1,6 @@ /***********************************************************************/ /* GLOBAL.H: Declaration file used by all CONNECT implementations. */ -/* (C) Copyright Olivier Bertrand 1993-2012 */ +/* (C) Copyright Olivier Bertrand 1993-2014 */ /***********************************************************************/ /***********************************************************************/ @@ -23,12 +23,12 @@ #define XML_SUPPORT 1 #endif -#if defined(XMSG) +#if defined(XMSG) // Definition used to read messages from message file. #include "msgid.h" #define MSG(I) PlugReadMessage(NULL, MSG_##I, #I) #define STEP(I) PlugReadMessage(g, MSG_##I, #I) -#elif defined(NEWMSG) +#elif defined(NEWMSG) // Definition used to get messages from resource. #include "msgid.h" #define MSG(I) PlugGetMessage(NULL, MSG_##I) @@ -85,6 +85,7 @@ #define TYPE_INT 7 #define TYPE_DECIM 9 #define TYPE_BIN 10 +#define TYPE_PCHAR 11 #if defined(OS32) #define SYS_STAMP "OS32" @@ -223,6 +224,7 @@ typedef struct _global { /* Global structure */ int Createas; /* To pass info to created table */ void *Xchk; /* indexes in create/alter */ short Alchecked; /* Checked for ALTER */ + short Mrr; /* True when doing mrr */ short Trace; int jump_level; jmp_buf jumper[MAX_JUMP + 2]; diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index ff15b27ca50..6f9334bb604 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -28,7 +28,7 @@ ha_connect will let you create/open/delete tables, the created table can be done specifying an already existing file, the drop table command will just suppress the table definition but not the eventual data file. - Indexes are not supported for all table types but data can be inserted, + Indexes are not supported for all table types but data can be inserted, updated or deleted. You can enable the CONNECT storage engine in your build by doing the @@ -108,7 +108,6 @@ #define MYSQL_SERVER 1 #define DONT_DEFINE_VOID -//#include "sql_partition.h" #include "sql_class.h" #include "create_options.h" #include "mysql_com.h" @@ -116,6 +115,10 @@ #include "sql_parse.h" #include "sql_base.h" #include <sys/stat.h> +#if defined(NEW_WAY) +#include "sql_table.h" +#endif // NEW_WAY +#include "sql_partition.h" #undef OFFSET #define NOPARSE @@ -167,10 +170,17 @@ #define SZWMIN 4194304 // Minimum work area size 4M extern "C" { - char version[]= "Version 1.02.0002 March 16, 2014"; + char version[]= "Version 1.03.0002 July 17, 2014"; + char compver[]= "Version 1.03.0002 " __DATE__ " " __TIME__; + +#if defined(WIN32) + char slash= '\\'; +#else // !WIN32 + char slash= '/'; +#endif // !WIN32 #if defined(XMSG) - char msglang[]; // Default message language + char msglang[]; // Default message language #endif int trace= 0; // The general trace value int xconv= 0; // The type conversion option @@ -185,7 +195,7 @@ extern "C" { ulong ha_connect::num= 0; //int DTVAL::Shift= 0; -/* CONNECT system variables */ +/* CONNECT system variables */ static int xtrace= 0; static int conv_size= SZCONV; static uint work_size= SZWORK; @@ -199,13 +209,16 @@ static my_bool indx_map= 0; /***********************************************************************/ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); void PushWarning(PGLOBAL g, THD *thd, int level); +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, + const char *db, char *tab, const char *src, int port); + static PCONNECT GetUser(THD *thd, PCONNECT xp); static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp); static handler *connect_create_handler(handlerton *hton, - TABLE_SHARE *table, - MEM_ROOT *mem_root); + TABLE_SHARE *table, + MEM_ROOT *mem_root); static int connect_assisted_discovery(handlerton *hton, THD* thd, TABLE_SHARE *table_s, @@ -304,13 +317,29 @@ ha_create_table_option connect_table_option_list[]= ha_create_table_option connect_field_option_list[]= { HA_FOPTION_NUMBER("FLAG", offset, (ulonglong) -1, 0, INT_MAX32, 1), + HA_FOPTION_NUMBER("MAX_DIST", freq, 0, 0, INT_MAX32, 1), // BLK_INDX +//HA_FOPTION_NUMBER("DISTRIB", opt, 0, 0, 2, 1), // used for BLK_INDX HA_FOPTION_NUMBER("FIELD_LENGTH", fldlen, 0, 0, INT_MAX32, 1), HA_FOPTION_STRING("DATE_FORMAT", dateformat), HA_FOPTION_STRING("FIELD_FORMAT", fieldformat), HA_FOPTION_STRING("SPECIAL", special), + HA_FOPTION_ENUM("DISTRIB", opt, "scattered,clustered,sorted", 0), HA_FOPTION_END }; +/* + CREATE TABLE option list (index options) + + These can be specified in the CREATE TABLE per index: + CREATE TABLE ( field ..., .., INDEX .... *here*, ... ) +*/ +ha_create_table_option connect_index_option_list[]= +{ + HA_IOPTION_BOOL("DYNAM", dynamic, 0), + HA_IOPTION_BOOL("MAPPED", mapped, 0), + HA_IOPTION_END +}; + /***********************************************************************/ /* Push G->Message as a MySQL warning. */ /***********************************************************************/ @@ -390,7 +419,7 @@ DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR name, LPCSTR dir) */ static const char *ha_connect_exts[]= { ".dos", ".fix", ".csv", ".bin", ".fmt", ".dbf", ".xml", ".ini", ".vec", - ".dnx", ".fnx", ".bnx", ".vnx", ".dbx", + ".dnx", ".fnx", ".bnx", ".vnx", ".dbx", ".dop", ".fop", ".bop", ".vop", NULL}; /** @@ -401,7 +430,7 @@ static int connect_init_func(void *p) { DBUG_ENTER("connect_init_func"); - sql_print_information("CONNECT: %s", version); + sql_print_information("CONNECT: %s", compver); // xtrace is now a system variable trace= xtrace; @@ -415,9 +444,11 @@ static int connect_init_func(void *p) connect_hton= (handlerton *)p; connect_hton->state= SHOW_OPTION_YES; connect_hton->create= connect_create_handler; - connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | HTON_NO_PARTITION; +//connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | HTON_NO_PARTITION; + connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED; connect_hton->table_options= connect_table_option_list; connect_hton->field_options= connect_field_option_list; + connect_hton->index_options= connect_index_option_list; connect_hton->tablefile_extensions= ha_connect_exts; connect_hton->discover_table_structure= connect_assisted_discovery; @@ -426,7 +457,7 @@ static int connect_init_func(void *p) DTVAL::SetTimeShift(); // Initialize time zone shift once for all DBUG_RETURN(0); -} +} // end of connect_init_func /** @@ -456,13 +487,13 @@ static int connect_done_func(void *p) } // endfor pc DBUG_RETURN(error); -} +} // end of connect_done_func /** @brief Example of simple lock controls. The "share" it creates is a - structure we will pass to each example handler. Do you have to have + structure we will pass to each CONNECT handler. Do you have to have one of these? Well, you have pieces that are used for locking, and they are needed to function. */ @@ -470,20 +501,22 @@ static int connect_done_func(void *p) CONNECT_SHARE *ha_connect::get_share() { CONNECT_SHARE *tmp_share; + lock_shared_ha_data(); - if (!(tmp_share= static_cast<CONNECT_SHARE*>(get_ha_share_ptr()))) - { + + if (!(tmp_share= static_cast<CONNECT_SHARE*>(get_ha_share_ptr()))) { tmp_share= new CONNECT_SHARE; if (!tmp_share) goto err; mysql_mutex_init(con_key_mutex_CONNECT_SHARE_mutex, &tmp_share->mutex, MY_MUTEX_INIT_FAST); set_ha_share_ptr(static_cast<Handler_share*>(tmp_share)); - } -err: + } // endif tmp_share + + err: unlock_shared_ha_data(); return tmp_share; -} +} // end of get_share static handler* connect_create_handler(handlerton *hton, @@ -514,15 +547,19 @@ ha_connect::ha_connect(handlerton *hton, TABLE_SHARE *table_arg) sdvalout= NULL; xmod= MODE_ANY; istable= false; -//*tname= '\0'; + *partname= 0; bzero((char*) &xinfo, sizeof(XINFO)); valid_info= false; valid_query_id= 0; creat_query_id= (table && table->in_use) ? table->in_use->query_id : 0; stop= false; alter= false; + mrr= false; + nox= false; + abort= false; indexing= -1; locked= 0; + part_id= NULL; data_file_name= NULL; index_file_name= NULL; enable_activate_all_index= 0; @@ -640,7 +677,13 @@ TABTYPE ha_connect::GetRealType(PTOS pos) const char *ha_connect::index_type(uint inx) { switch (GetIndexType(GetRealType())) { - case 1: return "XPLUG"; + case 1: + if (table_share) + return (GetIndexOption(&table_share->key_info[inx], "Dynamic")) + ? "KINDEX" : "XINDEX"; + else + return "XINDEX"; + case 2: return "REMOTE"; } // endswitch @@ -696,12 +739,15 @@ ulonglong ha_connect::table_flags() const if (pos) { TABTYPE type= hp->GetRealType(pos); - + + if (IsFileType(type)) + flags|= HA_FILE_BASED; + if (IsExactType(type)) flags|= (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT); // No data change on ALTER for outward tables - if (!IsFileType(type) || hp->FileExists(pos->filename)) + if (!IsFileType(type) || hp->FileExists(pos->filename, true)) flags|= HA_NO_COPY_ON_ALTER; } // endif pos @@ -710,7 +756,7 @@ ulonglong ha_connect::table_flags() const } // end of table_flags /****************************************************************************/ -/* Return the value of an option specified in the option list. */ +/* Return the value of an option specified in an option list. */ /****************************************************************************/ char *GetListOption(PGLOBAL g, const char *opname, const char *oplist, const char *def) @@ -771,6 +817,22 @@ PTOS ha_connect::GetTableOptionStruct(TABLE_SHARE *s) } // end of GetTableOptionStruct /****************************************************************************/ +/* Return the string eventually formatted with partition name. */ +/****************************************************************************/ +char *ha_connect::GetRealString(const char *s) +{ + char *sv; + + if (IsPartitioned() && s) { + sv= (char*)PlugSubAlloc(xp->g, NULL, strlen(s) + strlen(partname)); + sprintf(sv, s, partname); + } else + sv= (char*)s; + + return sv; +} // end of GetRealString + +/****************************************************************************/ /* Return the value of a string option or NULL if not specified. */ /****************************************************************************/ char *ha_connect::GetStringOption(char *opname, char *sdef) @@ -778,16 +840,26 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) char *opval= NULL; PTOS options= GetTableOptionStruct(); - if (!options) + if (!stricmp(opname, "Connect")) { + LEX_STRING cnc= (tshp) ? tshp->connect_string : table->s->connect_string; + + if (cnc.length) + opval= GetRealString(cnc.str); + + } else if (!stricmp(opname, "Query_String")) + opval= thd_query_string(table->in_use)->str; + else if (!stricmp(opname, "Partname")) + opval= partname; + else if (!options) ; else if (!stricmp(opname, "Type")) opval= (char*)options->type; else if (!stricmp(opname, "Filename")) - opval= (char*)options->filename; + opval= GetRealString(options->filename); else if (!stricmp(opname, "Optname")) opval= (char*)options->optname; else if (!stricmp(opname, "Tabname")) - opval= (char*)options->tabname; + opval= GetRealString(options->tabname); else if (!stricmp(opname, "Tablist")) opval= (char*)options->tablist; else if (!stricmp(opname, "Database") || @@ -795,8 +867,6 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) opval= (char*)options->dbname; else if (!stricmp(opname, "Separator")) opval= (char*)options->separator; - else if (!stricmp(opname, "Connect")) - opval= (tshp) ? tshp->connect_string.str : table->s->connect_string.str; else if (!stricmp(opname, "Qchar")) opval= (char*)options->qchar; else if (!stricmp(opname, "Module")) @@ -811,8 +881,6 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) opval= (char*)options->colist; else if (!stricmp(opname, "Data_charset")) opval= (char*)options->data_charset; - else if (!stricmp(opname, "Query_String")) - opval= thd_query_string(table->in_use)->str; if (!opval && options && options->oplist) opval= GetListOption(xp->g, opname, options->oplist); @@ -823,7 +891,7 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) if (!stricmp(opname, "Dbname") || !stricmp(opname, "Database")) opval= (char*)GetDBName(NULL); // Current database else if (!stricmp(opname, "Type")) // Default type - opval= (!options) ? NULL : + opval= (!options) ? NULL : (options->srcdef) ? (char*)"MYSQL" : (options->tabname) ? (char*)"PROXY" : (char*)"DOS"; else if (!stricmp(opname, "User")) // Connected user @@ -897,21 +965,21 @@ bool ha_connect::SetBooleanOption(char *opname, bool b) /****************************************************************************/ int ha_connect::GetIntegerOption(char *opname) { - ulonglong opval= NO_IVAL; - char *pv; - PTOS options= GetTableOptionStruct(); + ulonglong opval= NO_IVAL; + char *pv; + PTOS options= GetTableOptionStruct(); + TABLE_SHARE *tsp= (tshp) ? tshp : table_share; - if (!options) + if (!stricmp(opname, "Avglen")) + opval= (ulonglong)tsp->avg_row_length; + else if (!stricmp(opname, "Estimate")) + opval= (ulonglong)tsp->max_rows; + else if (!options) ; else if (!stricmp(opname, "Lrecl")) opval= options->lrecl; else if (!stricmp(opname, "Elements")) opval= options->elements; - else if (!stricmp(opname, "Estimate")) -// opval= options->estimate; - opval= (int)table->s->max_rows; - else if (!stricmp(opname, "Avglen")) - opval= (int)table->s->avg_row_length; else if (!stricmp(opname, "Multiple")) opval= options->multiple; else if (!stricmp(opname, "Header")) @@ -925,7 +993,7 @@ int ha_connect::GetIntegerOption(char *opname) if (opval == (ulonglong)NO_IVAL && options && options->oplist) if ((pv= GetListOption(xp->g, opname, options->oplist))) - opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true); + opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true); return (int)opval; } // end of GetIntegerOption @@ -1012,6 +1080,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) } // endif special pcf->Scale= 0; + pcf->Opt= (fop) ? (int)fop->opt : 0; if ((pcf->Length= fp->field_length) < 0) pcf->Length= 256; // BLOB? @@ -1020,10 +1089,12 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) if (fop) { pcf->Offset= (int)fop->offset; + pcf->Freq= (int)fop->freq; pcf->Datefmt= (char*)fop->dateformat; pcf->Fieldfmt= (char*)fop->fieldformat; } else { pcf->Offset= -1; + pcf->Freq= 0; pcf->Datefmt= NULL; pcf->Fieldfmt= NULL; } // endif fop @@ -1050,6 +1121,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) // Find if collation name ends by _ci if (!strcmp(cp + strlen(cp) - 3, "_ci")) { pcf->Scale= 1; // Case insensitive + pcf->Opt= 0; // Prevent index opt until it is safe } // endif ci break; @@ -1065,7 +1137,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) // Field_length is only used for DATE columns if (fop && fop->fldlen) pcf->Length= (int)fop->fldlen; - else { + else { int len; if (pcf->Datefmt) { @@ -1119,6 +1191,50 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) } // end of GetColumnOption /****************************************************************************/ +/* Return an index option structure. */ +/****************************************************************************/ +PXOS ha_connect::GetIndexOptionStruct(KEY *kp) +{ + return kp->option_struct; +} // end of GetIndexOptionStruct + +/****************************************************************************/ +/* Return a Boolean index option or false if not specified. */ +/****************************************************************************/ +bool ha_connect::GetIndexOption(KEY *kp, char *opname) +{ + bool opval= false; + PXOS options= GetIndexOptionStruct(kp); + + if (options) { + if (!stricmp(opname, "Dynamic")) + opval= options->dynamic; + else if (!stricmp(opname, "Mapped")) + opval= options->mapped; + + } else if (kp->comment.str != NULL) { + char *pv, *oplist= kp->comment.str; + + if ((pv= GetListOption(xp->g, opname, oplist))) + opval= (!*pv || *pv == 'y' || *pv == 'Y' || atoi(pv) != 0); + + } // endif comment + + return opval; +} // end of GetIndexOption + +/****************************************************************************/ +/* Returns the index description structure used to make the index. */ +/****************************************************************************/ +bool ha_connect::IsUnique(uint n) +{ + TABLE_SHARE *s= (table) ? table->s : NULL; + KEY kp= s->key_info[n]; + + return (kp.flags & 1) != 0; +} // end of IsUnique + +/****************************************************************************/ /* Returns the index description structure used to make the index. */ /****************************************************************************/ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) @@ -1162,7 +1278,7 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) #if 0 // NIY // Index on auto increment column can be an XXROW index - if (kp.key_part[k].field->flags & AUTO_INCREMENT_FLAG && + if (kp.key_part[k].field->flags & AUTO_INCREMENT_FLAG && kp.uder_defined_key_parts == 1) { char *type= GetStringOption("Type", "DOS"); TABTYPE typ= GetTypeID(type); @@ -1180,6 +1296,8 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) } // endfor k xdp->SetNParts(kp.user_defined_key_parts); + xdp->Dynamic= GetIndexOption(&kp, "Dynamic"); + xdp->Mapped= GetIndexOption(&kp, "Mapped"); if (pxd) pxd->SetNext(xdp); @@ -1192,6 +1310,17 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) return toidx; } // end of GetIndexInfo +bool ha_connect::IsPartitioned(void) +{ + if (tshp) + return tshp->partition_info_str_len > 0; + else if (table && table->part_info) + return true; + else + return false; + +} // end of IsPartitioned + const char *ha_connect::GetDBName(const char* name) { return (name) ? name : table->s->db.str; @@ -1202,6 +1331,11 @@ const char *ha_connect::GetTableName(void) return (tshp) ? tshp->table_name.str : table_share->table_name.str; } // end of GetTableName +char *ha_connect::GetPartName(void) +{ + return (IsPartitioned()) ? partname : (char*)GetTableName(); +} // end of GetTableName + #if 0 /****************************************************************************/ /* Returns the column real or special name length of a field. */ @@ -1270,7 +1404,7 @@ PTDB ha_connect::GetTDB(PGLOBAL g) tp->SetMode(xmod); } else if ((tp= CntGetTDB(g, table_name, xmod, this))) { valid_query_id= xp->last_query_id; - tp->SetMode(xmod); +// tp->SetMode(xmod); } else htrc("GetTDB: %s\n", g->Message); @@ -1349,6 +1483,17 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) for (field= table->field; fp= *field; field++) if (bitmap_is_set(ump, fp->field_index)) { strcpy(p, (char*)fp->field_name); + + if (part_id && bitmap_is_set(part_id, fp->field_index)) { + // Trying to update a column used for partitioning + // This cannot be currently done because it may require + // a row to be moved in another partition. + sprintf(g->Message, + "Cannot update column %s because it is used for partitioning", + p); + return HA_ERR_INTERNAL_ERROR; + } // endif part_id + p+= (strlen(p) + 1); } // endif used field @@ -1381,6 +1526,50 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) /****************************************************************************/ +/* CheckColumnList: check that all bitmap columns do exist. */ +/****************************************************************************/ +bool ha_connect::CheckColumnList(PGLOBAL g) +{ + // Check the list of used fields (columns) + int rc; + bool brc= false; + PCOL colp; + Field* *field; + Field* fp; + MY_BITMAP *map= table->read_set; + + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif jump_level + + if ((rc= setjmp(g->jumper[++g->jump_level])) == 0) { + for (field= table->field; fp= *field; field++) + if (bitmap_is_set(map, fp->field_index)) { + if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name, 0))) { + sprintf(g->Message, "Column %s not found in %s", + fp->field_name, tdbp->GetName()); + brc= true; + goto fin; + } // endif colp + + if ((brc= colp->InitValue(g))) + goto fin; + + colp->AddColUse(U_P); // For PLG tables + } // endif + + } else + brc= true; + + fin: + g->jump_level--; + return brc; +} // end of CheckColumnList + + +/****************************************************************************/ /* IsOpened: returns true if the table is already opened. */ /****************************************************************************/ bool ha_connect::IsOpened(void) @@ -1395,12 +1584,14 @@ bool ha_connect::IsOpened(void) /****************************************************************************/ int ha_connect::CloseTable(PGLOBAL g) { - int rc= CntCloseTable(g, tdbp); + int rc= CntCloseTable(g, tdbp, nox, abort); tdbp= NULL; sdvalin=NULL; sdvalout=NULL; valid_info= false; indexing= -1; + nox= false; + abort= false; return rc; } // end of CloseTable @@ -1447,10 +1638,14 @@ int ha_connect::MakeRecord(char *buf) if (bitmap_is_set(map, fp->field_index) || alter) { // This is a used field, fill the buffer with value for (colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) - if (!stricmp(colp->GetName(), (char*)fp->field_name)) + if ((!mrr || colp->GetKcol()) && + !stricmp(colp->GetName(), (char*)fp->field_name)) break; if (!colp) { + if (mrr) + continue; + htrc("Column %s not found\n", fp->field_name); dbug_tmp_restore_column_map(table->write_set, org_bitmap); DBUG_RETURN(HA_ERR_WRONG_IN_RECORD); @@ -1465,7 +1660,7 @@ int ha_connect::MakeRecord(char *buf) case TYPE_DATE: if (!sdvalout) sdvalout= AllocateValue(xp->g, TYPE_STRING, 20); - + switch (fp->type()) { case MYSQL_TYPE_DATE: fmt= "%Y-%m-%d"; @@ -1480,7 +1675,7 @@ int ha_connect::MakeRecord(char *buf) fmt= "%Y-%m-%d %H:%M:%S"; break; } // endswitch type - + // Get date in the format required by MySQL fields value->FormatValue(sdvalout, fmt); p= sdvalout->GetCharValue(); @@ -1524,6 +1719,11 @@ int ha_connect::MakeRecord(char *buf) } // endfor field + // This is sometimes required for partition tables because the buf + // can be different from the table->record[0] buffer + if (buf != (char*)table->record[0]) + memcpy(buf, table->record[0], table->s->stored_rec_length); + // This is copied from ha_tina and is necessary to avoid asserts dbug_tmp_restore_column_map(table->write_set, org_bitmap); DBUG_RETURN(rc); @@ -1674,7 +1874,11 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, char *qry, OPVAL op, char *q, KEY_PART_INFO *kpart; if (active_index == MAX_KEY) - return 0; + return false; + else if (!key) { + strcpy(g->Message, "MakeKeyWhere: No key"); + return true; + } // endif key strcat(qry, " WHERE ("); kfp= &table->key_info[active_index]; @@ -1815,6 +2019,196 @@ const char *ha_connect::GetValStr(OPVAL vop, bool neg) return val; } // end of GetValStr +#if 0 +/***********************************************************************/ +/* Check the WHERE condition and return a CONNECT filter. */ +/***********************************************************************/ +PFIL ha_connect::CheckFilter(PGLOBAL g) +{ + return CondFilter(g, (Item *)pushed_cond); +} // end of CheckFilter +#endif // 0 + +/***********************************************************************/ +/* Check the WHERE condition and return a CONNECT filter. */ +/***********************************************************************/ +PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) +{ + unsigned int i; + bool ismul= false; + OPVAL vop= OP_XX; + PFIL filp= NULL; + + if (!cond) + return NULL; + + if (xtrace) + htrc("Cond type=%d\n", cond->type()); + + if (cond->type() == COND::COND_ITEM) { + PFIL fp; + Item_cond *cond_item= (Item_cond *)cond; + + if (xtrace) + htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(), + cond_item->func_name()); + + switch (cond_item->functype()) { + case Item_func::COND_AND_FUNC: vop= OP_AND; break; + case Item_func::COND_OR_FUNC: vop= OP_OR; break; + default: return NULL; + } // endswitch functype + + List<Item>* arglist= cond_item->argument_list(); + List_iterator<Item> li(*arglist); + Item *subitem; + + for (i= 0; i < arglist->elements; i++) + if ((subitem= li++)) { + if (!(fp= CondFilter(g, subitem))) { + if (vop == OP_OR) + return NULL; + } else + filp= (filp) ? MakeFilter(g, filp, vop, fp) : fp; + + } else + return NULL; + + } else if (cond->type() == COND::FUNC_ITEM) { + unsigned int i; + bool iscol, neg= FALSE; + PCOL colp[2]= {NULL,NULL}; + PPARM pfirst= NULL, pprec= NULL; + POPER pop; + Item_func *condf= (Item_func *)cond; + Item* *args= condf->arguments(); + + if (xtrace) + htrc("Func type=%d argnum=%d\n", condf->functype(), + condf->argument_count()); + + switch (condf->functype()) { + case Item_func::EQUAL_FUNC: + case Item_func::EQ_FUNC: vop= OP_EQ; break; + case Item_func::NE_FUNC: vop= OP_NE; break; + case Item_func::LT_FUNC: vop= OP_LT; break; + case Item_func::LE_FUNC: vop= OP_LE; break; + case Item_func::GE_FUNC: vop= OP_GE; break; + case Item_func::GT_FUNC: vop= OP_GT; break; + case Item_func::IN_FUNC: vop= OP_IN; + case Item_func::BETWEEN: + ismul= true; + neg= ((Item_func_opt_neg *)condf)->negated; + break; + default: return NULL; + } // endswitch functype + + pop= (POPER)PlugSubAlloc(g, NULL, sizeof(OPER)); + pop->Name= NULL; + pop->Val=vop; + pop->Mod= 0; + + if (condf->argument_count() < 2) + return NULL; + + for (i= 0; i < condf->argument_count(); i++) { + if (xtrace) + htrc("Argtype(%d)=%d\n", i, args[i]->type()); + + if (i >= 2 && !ismul) { + if (xtrace) + htrc("Unexpected arg for vop=%d\n", vop); + + continue; + } // endif i + + if ((iscol= args[i]->type() == COND::FIELD_ITEM)) { + Item_field *pField= (Item_field *)args[i]; + + // IN and BETWEEN clauses should be col VOP list + if (i && ismul) + return NULL; + + if (pField->field->table != table || + !(colp[i]= tdbp->ColDB(g, (PSZ)pField->field->field_name, 0))) + return NULL; // Column does not belong to this table + + if (xtrace) { + htrc("Field index=%d\n", pField->field->field_index); + htrc("Field name=%s\n", pField->field->field_name); + } // endif xtrace + + } else { + char buff[256]; + String *res, tmp(buff, sizeof(buff), &my_charset_bin); + Item_basic_constant *pval= (Item_basic_constant *)args[i]; + PPARM pp= (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM)); + + // IN and BETWEEN clauses should be col VOP list + if (!i && (ismul)) + return NULL; + + if ((res= pval->val_str(&tmp)) == NULL) + return NULL; // To be clarified + + switch (args[i]->real_type()) { + case COND::STRING_ITEM: + pp->Type= TYPE_STRING; + pp->Value= PlugSubAlloc(g, NULL, res->length() + 1); + strncpy((char*)pp->Value, res->ptr(), res->length() + 1); + break; + case COND::INT_ITEM: + pp->Type= TYPE_INT; + pp->Value= PlugSubAlloc(g, NULL, sizeof(int)); + *((int*)pp->Value)= (int)pval->val_int(); + break; + case COND::DATE_ITEM: + pp->Type= TYPE_DATE; + pp->Value= PlugSubAlloc(g, NULL, sizeof(int)); + *((int*)pp->Value)= (int)pval->val_int_from_date(); + break; + case COND::REAL_ITEM: + pp->Type= TYPE_DOUBLE; + pp->Value= PlugSubAlloc(g, NULL, sizeof(double)); + *((double*)pp->Value)= pval->val_real(); + break; + case COND::DECIMAL_ITEM: + pp->Type= TYPE_DOUBLE; + pp->Value= PlugSubAlloc(g, NULL, sizeof(double)); + *((double*)pp->Value)= pval->val_real_from_decimal(); + break; + case COND::CACHE_ITEM: // Possible ??? + case COND::NULL_ITEM: // TODO: handle this + default: + return NULL; + } // endswitch type + + if (xtrace) + htrc("Value=%.*s\n", res->length(), res->ptr()); + + // Append the value to the argument list + if (pprec) + pprec->Next= pp; + else + pfirst= pp; + + pp->Domain= i; + pp->Next= NULL; + pprec= pp; + } // endif type + + } // endfor i + + filp= MakeFilter(g, colp, pop, pfirst, neg); + } else { + if (xtrace) + htrc("Unsupported condition\n"); + + return NULL; + } // endif's type + + return filp; +} // end of CondFilter /***********************************************************************/ /* Check the WHERE condition and return a MYSQL/ODBC/WQL filter. */ @@ -1901,7 +2295,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, AMT tty, Item *cond) case Item_func::GE_FUNC: vop= OP_GE; break; case Item_func::GT_FUNC: vop= OP_GT; break; case Item_func::IN_FUNC: vop= OP_IN; - case Item_func::BETWEEN: + case Item_func::BETWEEN: ismul= true; neg= ((Item_func_opt_neg *)condf)->negated; break; @@ -2071,35 +2465,36 @@ const COND *ha_connect::cond_push(const COND *cond) DBUG_ENTER("ha_connect::cond_push"); if (tdbp) { - AMT tty= tdbp->GetAmType(); - bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC); - bool b= (tty == TYPE_AM_WMI || tty == TYPE_AM_ODBC || - tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL || - tty == TYPE_AM_PLG || x); + PGLOBAL& g= xp->g; + AMT tty= tdbp->GetAmType(); + bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC); + bool b= (tty == TYPE_AM_WMI || tty == TYPE_AM_ODBC || + tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL || + tty == TYPE_AM_PLG || x); if (b) { - PGLOBAL& g= xp->g; PCFIL filp= (PCFIL)PlugSubAlloc(g, NULL, sizeof(CONDFIL)); filp->Body= (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0); *filp->Body= 0; filp->Op= OP_XX; filp->Cmds= NULL; - + if (CheckCond(g, filp, tty, (Item *)cond)) { if (xtrace) htrc("cond_push: %s\n", filp->Body); - + if (!x) PlugSubAlloc(g, NULL, strlen(filp->Body) + 1); else cond= NULL; // Does this work? - + tdbp->SetCondFil(filp); } else if (x && cond) tdbp->SetCondFil(filp); // Wrong filter - } // endif b + } else + tdbp->SetFilter(CondFilter(g, (Item *)cond)); } // endif tdbp @@ -2116,7 +2511,7 @@ ha_rows ha_connect::records() if (!valid_info) info(HA_STATUS_VARIABLE); - if (tdbp && tdbp->Cardinality(NULL)) + if (tdbp) return stats.records; else return HA_POS_ERROR; @@ -2158,6 +2553,21 @@ bool ha_connect::get_error_message(int error, String* buf) DBUG_RETURN(false); } // end of get_error_message +/** + Convert a filename partition name to system +*/ +static char *decode(PGLOBAL g, const char *pn) + { + char *buf= (char*)PlugSubAlloc(g, NULL, strlen(pn) + 1); + uint dummy_errors; + uint32 len= copy_and_convert(buf, strlen(pn) + 1, + system_charset_info, + pn, strlen(pn), + &my_charset_filename, + &dummy_errors); + buf[len]= '\0'; + return buf; + } // end of decode /** @brief @@ -2197,9 +2607,29 @@ int ha_connect::open(const char *name, int mode, uint test_if_locked) PGLOBAL g= (xp) ? xp->g : NULL; // Try to set the database environment - if (g) + if (g) { rc= (CntCheckDB(g, this, name)) ? (-2) : 0; - else + + if (g->Mrr) { + // This should only happen for the mrr secondary handler + mrr= true; + g->Mrr= false; + } else + mrr= false; + +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (table->part_info) { + if (GetStringOption("Filename") || GetStringOption("Tabname") + || GetStringOption("Connect")) { + strcpy(partname, decode(g, strrchr(name, '#') + 1)); +// strcpy(partname, table->part_info->curr_part_elem->partition_name); + part_id= &table->part_info->full_part_field_set; + } else // Inward table + strcpy(partname, strrchr(name, slash) + 1); + part_id= &table->part_info->full_part_field_set; // Temporary + } // endif part_info +#endif // WITH_PARTITION_STORAGE_ENGINE + } else rc= HA_ERR_INTERNAL_ERROR; DBUG_RETURN(rc); @@ -2212,6 +2642,7 @@ int ha_connect::open(const char *name, int mode, uint test_if_locked) int ha_connect::optimize(THD* thd, HA_CHECK_OPT* check_opt) { int rc= 0; + bool dop= (check_opt != NULL); PGLOBAL& g= xp->g; PDBUSER dup= PlgGetUser(g); @@ -2221,20 +2652,16 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT* check_opt) dup->Check |= CHK_OPT; if (tdbp) { - if (((PTDBASE)tdbp)->GetDef()->Indexable() == 2) { - // Nothing to do for remote index - } else if (!((PTDBASE)tdbp)->GetDef()->Indexable()) { - sprintf(g->Message, "optimize: Table %s is not indexable", tdbp->GetName()); - my_message(ER_INDEX_REBUILD, g->Message, MYF(0)); - rc= HA_ERR_UNSUPPORTED; - } else if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, true))) { + bool b= (((PTDBASE)tdbp)->GetDef()->Indexable() == 1); + + if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, b))) { if (rc == RC_INFO) { push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); rc= 0; } else rc= HA_ERR_INTERNAL_ERROR; - } // endif's + } // endif rc } else rc= HA_ERR_INTERNAL_ERROR; @@ -2306,8 +2733,13 @@ int ha_connect::write_row(uchar *buf) DBUG_ENTER("ha_connect::write_row"); // This is not tested yet - if (xmod == MODE_ALTER) + if (xmod == MODE_ALTER) { + if (IsPartitioned() && GetStringOption("Filename", NULL)) + // Why does this happen now that check_if_supported_inplace_alter is called? + DBUG_RETURN(0); // Alter table on an outward partition table + xmod= MODE_INSERT; + } // endif xmod // Open the table if it was not opened yet (locked) if (!IsOpened() || xmod != tdbp->GetMode()) { @@ -2380,7 +2812,7 @@ int ha_connect::update_row(const uchar *old_data, uchar *new_data) // Check values for possible change in indexed column if ((rc= CheckRecord(g, old_data, new_data))) - return rc; + DBUG_RETURN(rc); if (CntUpdateRow(g, tdbp)) { DBUG_PRINT("update_row", ("%s", g->Message)); @@ -2438,12 +2870,19 @@ int ha_connect::index_init(uint idx, bool sorted) htrc("index_init: this=%p idx=%u sorted=%d\n", this, idx, sorted); if (GetIndexType(GetRealType()) == 2) { - // This is a remote index - xmod= MODE_READX; + if (xmod == MODE_READ) + // This is a remote index + xmod= MODE_READX; if (!(rc= rnd_init(0))) { - active_index= idx; - indexing= 2; // TO DO: mul? +// if (xmod == MODE_READX) { + active_index= idx; + indexing= IsUnique(idx) ? 1 : 2; +// } else { +// active_index= MAX_KEY; +// indexing= 0; +// } // endif xmod + } //endif rc DBUG_RETURN(rc); @@ -2466,7 +2905,7 @@ int ha_connect::index_init(uint idx, bool sorted) htrc("index_init CONNECT: %s\n", g->Message); active_index= MAX_KEY; rc= HA_ERR_INTERNAL_ERROR; - } else { + } else if (((PTDBDOX)tdbp)->To_Kindex) { if (((PTDBDOX)tdbp)->To_Kindex->GetNum_K()) { if (((PTDBASE)tdbp)->GetFtype() != RECFM_NAF) ((PTDBDOX)tdbp)->GetTxfp()->ResetBuffer(g); @@ -2479,7 +2918,7 @@ int ha_connect::index_init(uint idx, bool sorted) } // endif indexing if (xtrace) - htrc("index_init: rc=%d indexing=%d active_index=%d\n", + htrc("index_init: rc=%d indexing=%d active_index=%d\n", rc, indexing, active_index); DBUG_RETURN(rc); @@ -2492,6 +2931,7 @@ int ha_connect::index_end() { DBUG_ENTER("index_end"); active_index= MAX_KEY; + ds_mrr.dsmrr_close(); DBUG_RETURN(rnd_end()); } // end of index_end @@ -2505,7 +2945,7 @@ int ha_connect::ReadIndexed(uchar *buf, OPVAL op, const uchar *key, uint key_len //statistic_increment(ha_read_key_count, &LOCK_status); - switch (CntIndexRead(xp->g, tdbp, op, key, (int)key_len)) { + switch (CntIndexRead(xp->g, tdbp, op, key, (int)key_len, mrr)) { case RC_OK: xp->fnd++; rc= MakeRecord((char*)buf); @@ -2564,16 +3004,22 @@ int ha_connect::index_read(uchar * buf, const uchar * key, uint key_len, case HA_READ_KEY_EXACT: op= OP_EQ; break; case HA_READ_AFTER_KEY: op= OP_GT; break; case HA_READ_KEY_OR_NEXT: op= OP_GE; break; - default: DBUG_RETURN(-1); break; + default: DBUG_RETURN(-1); break; } // endswitch find_flag if (xtrace > 1) htrc("%p index_read: op=%d\n", this, op); - if (indexing > 0) + if (indexing > 0) { rc= ReadIndexed(buf, op, key, key_len); - else - rc= HA_ERR_INTERNAL_ERROR; + + if (rc == HA_ERR_INTERNAL_ERROR) { + nox= true; // To block making indexes + abort= true; // Don't rename temp file + } // endif rc + + } else + rc= HA_ERR_INTERNAL_ERROR; // HA_ERR_KEY_NOT_FOUND ? DBUG_RETURN(rc); } // end of index_read @@ -2668,7 +3114,7 @@ int ha_connect::index_last(uchar *buf) rc= ReadIndexed(buf, OP_LAST); DBUG_RETURN(rc); -} // end of index_last +} /****************************************************************************/ @@ -2720,7 +3166,7 @@ int ha_connect::rnd_init(bool scan) } // endif xmod if (xtrace) - htrc("rnd_init: this=%p scan=%d xmod=%d alter=%d\n", + htrc("rnd_init: this=%p scan=%d xmod=%d alter=%d\n", this, scan, xmod, alter); if (!g || !table || xmod == MODE_INSERT) @@ -2728,7 +3174,11 @@ int ha_connect::rnd_init(bool scan) // Do not close the table if it was opened yet (locked?) if (IsOpened()) { - if (tdbp->OpenDB(g)) // Rewind table + if (IsPartitioned() && xmod != MODE_INSERT) + if (CheckColumnList(g)) // map can have been changed + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + if (tdbp->OpenDB(g)) // Rewind table DBUG_RETURN(HA_ERR_INTERNAL_ERROR); else DBUG_RETURN(0); @@ -2737,7 +3187,7 @@ int ha_connect::rnd_init(bool scan) tdbp= NULL; // Not valid anymore // When updating, to avoid skipped update, force the table - // handler to retrieve write-only fields to be able to compare + // handler to retrieve write-only fields to be able to compare // records and detect data change. if (xmod == MODE_UPDATE) bitmap_union(table->read_set, table->write_set); @@ -2770,6 +3220,7 @@ int ha_connect::rnd_end() // if (tdbp && xp->last_query_id == valid_query_id) // rc= CloseTable(xp->g); + ds_mrr.dsmrr_close(); DBUG_RETURN(rc); } // end of rnd_end @@ -2864,6 +3315,10 @@ void ha_connect::position(const uchar *record) DBUG_ENTER("ha_connect::position"); //if (((PTDBASE)tdbp)->GetDef()->Indexable()) my_store_ptr(ref, ref_length, (my_off_t)((PTDBASE)tdbp)->GetRecpos()); + + if (trace) + htrc("position: pos=%d\n", ((PTDBASE)tdbp)->GetRecpos()); + DBUG_VOID_RETURN; } // end of position @@ -2890,9 +3345,13 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos) PTDBASE tp= (PTDBASE)tdbp; DBUG_ENTER("ha_connect::rnd_pos"); - if (!tp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) + if (!tp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) { + if (trace) + htrc("rnd_pos: %d\n", tp->GetRecpos()); + + tp->SetFilter(NULL); rc= rnd_next(buf); - else + } else rc= HA_ERR_KEY_NOT_FOUND; DBUG_RETURN(rc); @@ -2947,27 +3406,30 @@ int ha_connect::info(uint flag) if (xtrace) htrc("%p In info: flag=%u valid_info=%d\n", this, flag, valid_info); - if (!valid_info) { - // tdbp must be available to get updated info - if (xp->CheckQuery(valid_query_id) || !tdbp) { - PDBUSER dup= PlgGetUser(g); - PCATLG cat= (dup) ? dup->Catalog : NULL; - - if (xmod == MODE_ANY || xmod == MODE_ALTER) { - // Pure info, not a query - pure= true; - xp->CheckCleanup(); - } // endif xmod - - // This is necessary for getting file length - if (cat && table) - cat->SetDataPath(g, table->s->db.str); - else - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + // tdbp must be available to get updated info + if (xp->CheckQuery(valid_query_id) || !tdbp) { + PDBUSER dup= PlgGetUser(g); + PCATLG cat= (dup) ? dup->Catalog : NULL; - tdbp= GetTDB(g); - } // endif tdbp + if (xmod == MODE_ANY || xmod == MODE_ALTER) { + // Pure info, not a query + pure= true; + xp->CheckCleanup(); + } // endif xmod + // This is necessary for getting file length + if (cat && table) + cat->SetDataPath(g, table->s->db.str); + else + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + + if (!(tdbp= GetTDB(g))) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + + valid_info = false; + } // endif tdbp + + if (!valid_info) { valid_info= CntInfo(g, tdbp, &xinfo); if (((signed)xinfo.records) < 0) @@ -3109,15 +3571,15 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn) if (options->filename && *options->filename) { char *s, path[FN_REFLEN], dbpath[FN_REFLEN]; #if defined(WIN32) - s= "\\"; + s= "\\"; #else // !WIN32 - s= "/"; + s= "/"; #endif // !WIN32 strcpy(dbpath, mysql_real_data_home); - + if (db) strcat(strcat(dbpath, db), s); - + (void) fn_format(path, options->filename, dbpath, "", MY_RELATIVE_PATH | MY_UNPACK_FILENAME); @@ -3151,7 +3613,7 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn) return true; } // end of check_privileges -// Check that two indexes are equivalent +// Check that two indexes are equivalent bool ha_connect::IsSameIndex(PIXDEF xp1, PIXDEF xp2) { bool b= true; @@ -3176,7 +3638,7 @@ bool ha_connect::IsSameIndex(PIXDEF xp1, PIXDEF xp2) return b; } // end of IsSameIndex -MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, +MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, MODE newmode, bool *chk, bool *cras) { if ((trace= xtrace)) { @@ -3219,11 +3681,6 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, case SQLCOM_RENAME_TABLE: newmode= MODE_ANY; break; - case SQLCOM_DROP_INDEX: - case SQLCOM_CREATE_INDEX: - newmode= MODE_ANY; -// stop= true; - break; case SQLCOM_CREATE_VIEW: case SQLCOM_DROP_VIEW: newmode= MODE_ANY; @@ -3231,6 +3688,13 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, case SQLCOM_ALTER_TABLE: newmode= MODE_ALTER; break; + case SQLCOM_DROP_INDEX: + case SQLCOM_CREATE_INDEX: +// if (!IsPartitioned()) { + newmode= MODE_ANY; + break; +// } // endif partitioned + default: htrc("Unsupported sql_command=%d\n", thd_sql_command(thd)); strcpy(g->Message, "CONNECT Unsupported command"); @@ -3260,10 +3724,6 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, case SQLCOM_LOCK_TABLES: locked= 1; break; - case SQLCOM_DROP_INDEX: - case SQLCOM_CREATE_INDEX: - *chk= true; -// stop= true; case SQLCOM_DROP_TABLE: case SQLCOM_RENAME_TABLE: newmode= MODE_ANY; @@ -3276,6 +3736,14 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, *chk= true; newmode= MODE_ALTER; break; + case SQLCOM_DROP_INDEX: + case SQLCOM_CREATE_INDEX: +// if (!IsPartitioned()) { + *chk= true; + newmode= MODE_ANY; + break; +// } // endif partitioned + default: htrc("Unsupported sql_command=%d\n", thd_sql_command(thd)); strcpy(g->Message, "CONNECT Unsupported command"); @@ -3364,7 +3832,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) if (xtrace) htrc("external_lock: this=%p thd=%p xp=%p g=%p lock_type=%d\n", this, thd, xp, g, lock_type); - + if (!g) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); @@ -3392,7 +3860,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) sprintf(g->Message, "external_lock: unexpected command %d", sqlcom); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); DBUG_RETURN(0); - } else if (g->Xchk) { + } else if (g->Xchk) { if (!tdbp) { if (!(tdbp= GetTDB(g))) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); @@ -3496,6 +3964,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) } // endif Close locked= 0; + xmod= MODE_ANY; // For info commands DBUG_RETURN(rc); } // endif MODE_ANY @@ -3529,7 +3998,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) } // endif Xchk } else - g->Xchk= NULL; + g->Xchk= NULL; #endif // 0 if (cras) @@ -3653,11 +4122,6 @@ filename_to_dbname_and_tablename(const char *filename, char *database, size_t database_size, char *table, size_t table_size) { -#if defined(WIN32) - char slash= '\\'; -#else // !WIN32 - char slash= '/'; -#endif // !WIN32 LEX_CSTRING d, t; size_t length= strlen(filename); @@ -3682,10 +4146,10 @@ filename_to_dbname_and_tablename(const char *filename, /** @brief Used to delete or rename a table. By the time delete_table() has been - called all opened references to this table will have been closed + called all opened references to this table will have been closed (and your globally shared references released) ===> too bad!!! The variable name will just be the name of the table. - You will need to remove or rename any files you have created at + You will need to remove or rename any files you have created at this point. @details @@ -3711,7 +4175,7 @@ int ha_connect::delete_or_rename_table(const char *name, const char *to) if (xtrace) { if (to) - htrc("rename_table: this=%p thd=%p sqlcom=%d from=%s to=%s\n", + htrc("rename_table: this=%p thd=%p sqlcom=%d from=%s to=%s\n", this, thd, sqlcom, name, to); else htrc("delete_table: this=%p thd=%p sqlcom=%d name=%s\n", @@ -3732,29 +4196,27 @@ int ha_connect::delete_or_rename_table(const char *name, const char *to) // If a temporary file exists, all the tests below were passed // successfully when making it, so they are not needed anymore // in particular because they sometimes cause DBUG_ASSERT crash. - if (*tabname != '#') { + // Also, for partitioned tables, no test can be done because when + // this function is called, the .par file is already deleted and + // this causes the open_table_def function to fail. + // Not having any other clues (table and table_share are NULL) + // the only mean we have to test for partitioning is this: + if (*tabname != '#' && !strstr(tabname, "#P#")) { // We have to retrieve the information about this table options. ha_table_option_struct *pos; char key[MAX_DBKEY_LENGTH]; uint key_length; TABLE_SHARE *share; +// if ((p= strstr(tabname, "#P#"))) won't work, see above +// *p= 0; // Get the main the table name + key_length= tdc_create_key(key, db, tabname); // share contains the option struct that we need if (!(share= alloc_table_share(db, tabname, key, key_length))) DBUG_RETURN(rc); -#if 0 - if (*tabname == '#') { - // These are in ???? charset after renaming - char *p= strchr(share->path.str, '@'); - strcpy(p, share->table_name.str); - share->path.length= strlen(share->path.str); - share->normalized_path.length= share->path.length; - } // endif tabname -#endif // 0 - // Get the share info from the .frm file if (!open_table_def(thd, share)) { // Now we can work @@ -3764,7 +4226,7 @@ int ha_connect::delete_or_rename_table(const char *name, const char *to) else if (IsFileType(GetRealType(pos)) && !pos->filename) ok= true; - + } // endif pos } else // Avoid infamous DBUG_ASSERT @@ -3843,10 +4305,10 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, else rows= (ha_rows)nval; - } else if (indexing < 0) - rows= HA_POS_ERROR; - else + } else if (indexing == 0) rows= 100000000; // Don't use missing index + else + rows= HA_POS_ERROR; DBUG_RETURN(rows); } // end of records_in_range @@ -3854,7 +4316,7 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, /** Convert an ISO-8859-1 column name to UTF-8 */ -static char *encode(PGLOBAL g, char *cnm) +static char *encode(PGLOBAL g, const char *cnm) { char *buf= (char*)PlugSubAlloc(g, NULL, strlen(cnm) * 3); uint dummy_errors; @@ -3865,7 +4327,7 @@ static char *encode(PGLOBAL g, char *cnm) &dummy_errors); buf[len]= '\0'; return buf; - } // end of Encode + } // end of encode /** Store field definition for create. @@ -3873,6 +4335,83 @@ static char *encode(PGLOBAL g, char *cnm) @return Return 0 if ok */ +#if defined(NEW_WAY) +static bool add_fields(PGLOBAL g, + THD *thd, + Alter_info *alter_info, + char *name, + int typ, int len, int dec, + uint type_modifier, + char *rem, +// CHARSET_INFO *cs, +// void *vcolinfo, +// engine_option_value *create_options, + int flg, + bool dbf, + char v) +{ + register Create_field *new_field; + char *length, *decimals= NULL; + enum_field_types type; +//Virtual_column_info *vcol_info= (Virtual_column_info *)vcolinfo; + engine_option_value *crop; + LEX_STRING *comment; + LEX_STRING *field_name; + + DBUG_ENTER("ha_connect::add_fields"); + + if (len) { + if (!v && typ == TYPE_STRING && len > 255) + v= 'V'; // Change CHAR to VARCHAR + + length= (char*)PlugSubAlloc(g, NULL, 8); + sprintf(length, "%d", len); + + if (typ == TYPE_DOUBLE) { + decimals= (char*)PlugSubAlloc(g, NULL, 8); + sprintf(decimals, "%d", min(dec, (min(len, 31) - 1))); + } // endif dec + + } else + length= NULL; + + if (!rem) + rem= ""; + + type= PLGtoMYSQL(typ, dbf, v); + comment= thd->make_lex_string(rem, strlen(rem)); + field_name= thd->make_lex_string(name, strlen(name)); + + switch (v) { + case 'Z': type_modifier|= ZEROFILL_FLAG; + case 'U': type_modifier|= UNSIGNED_FLAG; break; + } // endswitch v + + if (flg) { + engine_option_value *start= NULL, *end= NULL; + LEX_STRING *flag= thd->make_lex_string("flag", 4); + + crop= new(thd->mem_root) engine_option_value(*flag, (ulonglong)flg, + &start, &end, thd->mem_root); + } else + crop= NULL; + + if (check_string_char_length(field_name, "", NAME_CHAR_LEN, + system_charset_info, 1)) { + my_error(ER_TOO_LONG_IDENT, MYF(0), field_name->str); /* purecov: inspected */ + DBUG_RETURN(1); /* purecov: inspected */ + } // endif field_name + + if (!(new_field= new Create_field()) || + new_field->init(thd, field_name->str, type, length, decimals, + type_modifier, NULL, NULL, comment, NULL, + NULL, NULL, 0, NULL, crop, true)) + DBUG_RETURN(1); + + alter_info->create_list.push_back(new_field); + DBUG_RETURN(0); +} // end of add_fields +#else // !NEW_WAY static bool add_field(String *sql, const char *field_name, int typ, int len, int dec, uint tm, const char *rem, char *dft, char *xtra, int flag, bool dbf, char v) @@ -3902,7 +4441,7 @@ static bool add_field(String *sql, const char *field_name, int typ, error|= sql->append(')'); } // endif len - + if (v == 'U') error|= sql->append(" UNSIGNED"); else if (v == 'Z') @@ -3942,6 +4481,7 @@ static bool add_field(String *sql, const char *field_name, int typ, error|= sql->append(','); return error; } // end of add_field +#endif // !NEW_WAY /** Initialise the table share with the new columns. @@ -3949,8 +4489,112 @@ static bool add_field(String *sql, const char *field_name, int typ, @return Return 0 if ok */ -static int init_table_share(THD* thd, - TABLE_SHARE *table_s, +#if defined(NEW_WAY) +//static bool sql_unusable_for_discovery(THD *thd, const char *sql); + +static int init_table_share(THD *thd, + TABLE_SHARE *table_s, + HA_CREATE_INFO *create_info, + Alter_info *alter_info) +{ + KEY *not_used_1; + uint not_used_2; + int rc= 0; + handler *file; + LEX_CUSTRING frm= {0,0}; + + DBUG_ENTER("init_table_share"); + +#if 0 + ulonglong saved_mode= thd->variables.sql_mode; + CHARSET_INFO *old_cs= thd->variables.character_set_client; + Parser_state parser_state; + char *sql_copy; + LEX *old_lex; + Query_arena *arena, backup; + LEX tmp_lex; + + /* + Ouch. Parser may *change* the string it's working on. + Currently (2013-02-26) it is used to permanently disable + conditional comments. + Anyway, let's copy the caller's string... + */ + if (!(sql_copy= thd->strmake(sql, sql_length))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + + if (parser_state.init(thd, sql_copy, sql_length)) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + + thd->variables.sql_mode= MODE_NO_ENGINE_SUBSTITUTION | MODE_NO_DIR_IN_CREATE; + thd->variables.character_set_client= system_charset_info; + old_lex= thd->lex; + thd->lex= &tmp_lex; + + arena= thd->stmt_arena; + + if (arena->is_conventional()) + arena= 0; + else + thd->set_n_backup_active_arena(arena, &backup); + + lex_start(thd); + + if ((error= parse_sql(thd, & parser_state, NULL))) + goto ret; + + if (table_s->sql_unusable_for_discovery(thd, NULL)) { + my_error(ER_SQL_DISCOVER_ERROR, MYF(0), plugin_name(db_plugin)->str, + db.str, table_name.str, sql_copy); + goto ret; + } // endif unusable + + thd->lex->create_info.db_type= plugin_data(db_plugin, handlerton *); + + if (tabledef_version.str) + thd->lex->create_info.tabledef_version= tabledef_version; +#endif // 0 + + tmp_disable_binlog(thd); + + file= mysql_create_frm_image(thd, table_s->db.str, table_s->table_name.str, + create_info, alter_info, C_ORDINARY_CREATE, + ¬_used_1, ¬_used_2, &frm); + if (file) + delete file; + else + rc= OPEN_FRM_CORRUPTED; + + if (!rc && frm.str) { + table_s->option_list= 0; // cleanup existing options ... + table_s->option_struct= 0; // ... if it's an assisted discovery + rc= table_s->init_from_binary_frm_image(thd, true, frm.str, frm.length); + } // endif frm + +//ret: + my_free(const_cast<uchar*>(frm.str)); + reenable_binlog(thd); +#if 0 + lex_end(thd->lex); + thd->lex= old_lex; + if (arena) + thd->restore_active_arena(arena, &backup); + thd->variables.sql_mode= saved_mode; + thd->variables.character_set_client= old_cs; +#endif // 0 + + if (thd->is_error() || rc) { + thd->clear_error(); + my_error(ER_NO_SUCH_TABLE, MYF(0), table_s->db.str, + table_s->table_name.str); + DBUG_RETURN(HA_ERR_NOT_A_TABLE); + } else + DBUG_RETURN(0); + +} // end of init_table_share +#else // !NEW_WAY +static int init_table_share(THD* thd, + TABLE_SHARE *table_s, HA_CREATE_INFO *create_info, // char *dsn, String *sql) @@ -4040,9 +4684,26 @@ static int init_table_share(THD* thd, return table_s->init_from_sql_statement_string(thd, true, sql->ptr(), sql->length()); } // end of init_table_share +#endif // !NEW_WAY + +// Add an option to the create_info option list +static void add_option(THD* thd, HA_CREATE_INFO *create_info, + const char *opname, const char *opval) +{ +#if defined(NEW_WAY) + LEX_STRING *opn= thd->make_lex_string(opname, strlen(opname)); + LEX_STRING *val= thd->make_lex_string(opval, strlen(opval)); + engine_option_value *pov, **start= &create_info->option_list, *end= NULL; + + for (pov= *start; pov; pov= pov->next) + end= pov; + + pov= new(thd->mem_root) engine_option_value(*opn, *val, false, start, &end); +#endif // NEW_WAY +} // end of add_option // Used to check whether a MYSQL table is created on itself -static bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, const char *db, char *tab, const char *src, int port) { if (src) @@ -4096,10 +4757,15 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, PDBUSER dup= PlgGetUser(g); PCATLG cat= (dup) ? dup->Catalog : NULL; PTOS topt= table_s->option_struct; +#if defined(NEW_WAY) +//CHARSET_INFO *cs; + Alter_info alter_info; +#else // !NEW_WAY char buf[1024]; String sql(buf, sizeof(buf), system_charset_info); sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info); +#endif // !NEW_WAY if (!g) return HA_ERR_INTERNAL_ERROR; @@ -4159,6 +4825,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, ttp= GetTypeID(topt->type); sprintf(g->Message, "No table_type. Was set to %s", topt->type); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + add_option(thd, create_info, "table_type", topt->type); } else if (ttp == TAB_NIY) { sprintf(g->Message, "Unsupported table type %s", topt->type); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); @@ -4191,6 +4858,9 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) tab= table_s->table_name.str; // Default value +#if defined(NEW_WAY) +// add_option(thd, create_info, "tabname", tab); +#endif // NEW_WAY } // endif tab switch (ttp) { @@ -4342,7 +5012,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, if (src) { qrp= ODBCSrcCols(g, dsn, (char*)src); src= NULL; // for next tests - } else + } else qrp= ODBCColumns(g, dsn, shm, tab, NULL, mxr, fnc == FNC_COL); break; @@ -4364,7 +5034,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, #endif // ODBC_SUPPORT #if defined(MYSQL_SUPPORT) case TAB_MYSQL: - qrp= MyColumns(g, thd, host, db, user, pwd, tab, + qrp= MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, fnc == FNC_COL); break; #endif // MYSQL_SUPPORT @@ -4418,15 +5088,19 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, dec= crp->Prec; flg= crp->Flag; v= crp->Var; - + if (!len && typ == TYPE_STRING) len= 256; // STRBLK's have 0 length // Now add the field +#if defined(NEW_WAY) + rc= add_fields(g, thd, &alter_info, cnm, typ, len, dec, + NOT_NULL_FLAG, "", flg, dbf, v); +#else // !NEW_WAY if (add_field(&sql, cnm, typ, len, dec, NOT_NULL_FLAG, NULL, NULL, NULL, flg, dbf, v)) rc= HA_ERR_OUT_OF_MEM; - +#endif // !NEW_WAY } // endfor crp } else { @@ -4446,7 +5120,12 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, tm= NOT_NULL_FLAG; cnm= (char*)"noname"; dft= xtra= NULL; +#if defined(NEW_WAY) + rem= ""; +// cs= NULL; +#else // !NEW_WAY rem= NULL; +#endif // !NEW_WAY for (crp= qrp->Colresp; crp; crp= crp->Next) switch (crp->Fld) { @@ -4475,7 +5154,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, case FLD_REM: rem= crp->Kdata->GetCharValue(i); break; -// case FLD_CHARSET: +// case FLD_CHARSET: // No good because remote table is already translated // if (*(csn= crp->Kdata->GetCharValue(i))) // cs= get_charset_by_name(csn, 0); @@ -4528,16 +5207,25 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, prec= len; // Now add the field +#if defined(NEW_WAY) + rc= add_fields(g, thd, &alter_info, cnm, typ, prec, dec, + tm, rem, 0, dbf, v); +#else // !NEW_WAY if (add_field(&sql, cnm, typ, prec, dec, tm, rem, dft, xtra, 0, dbf, v)) rc= HA_ERR_OUT_OF_MEM; +#endif // !NEW_WAY } // endfor i } // endif fnc +#if defined(NEW_WAY) + rc= init_table_share(thd, table_s, create_info, &alter_info); +#else // !NEW_WAY if (!rc) rc= init_table_share(thd, table_s, create_info, &sql); // rc= init_table_share(thd, table_s, create_info, dsn, &sql); +#endif // !NEW_WAY return rc; } // endif ok @@ -4597,12 +5285,15 @@ int ha_connect::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { int rc= RC_OK; - bool dbf; + bool dbf, inward; Field* *field; Field *fp; TABTYPE type; TABLE *st= table; // Probably unuseful - THD *thd= ha_thd(); + THD *thd= ha_thd(); +#if defined(WITH_PARTITION_STORAGE_ENGINE) + partition_info *part_info= table_arg->part_info; +#endif // WITH_PARTITION_STORAGE_ENGINE xp= GetUser(thd, xp); PGLOBAL g= xp->g; @@ -4622,7 +5313,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, // Check table type if (type == TAB_UNDEF) { - options->type= (options->srcdef) ? "MYSQL" : + options->type= (options->srcdef) ? "MYSQL" : (options->tabname) ? "PROXY" : "DOS"; type= GetTypeID(options->type); sprintf(g->Message, "No table_type. Will be set to %s", options->type); @@ -4639,6 +5330,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (check_privileges(thd, options, GetDBfromName(name))) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + inward= IsFileType(type) && !options->filename; + if (options->data_charset) { const CHARSET_INFO *data_charset; @@ -4690,6 +5383,9 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endif tabname case TAB_MYSQL: +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (!part_info) +#endif // WITH_PARTITION_STORAGE_ENGINE {const char *src= options->srcdef; char *host, *db, *tab= (char*)options->tabname; int port; @@ -4733,7 +5429,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } // endif CheckSelf - }break; + }break; default: /* do nothing */; break; } // endswitch ttp @@ -4829,8 +5525,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, sprintf(g->Message, "Unsupported 0 length for column %s", fp->field_name); rc= HA_ERR_INTERNAL_ERROR; - my_printf_error(ER_UNKNOWN_ERROR, - "Unsupported 0 length for column %s", + my_printf_error(ER_UNKNOWN_ERROR, + "Unsupported 0 length for column %s", MYF(0), fp->field_name); DBUG_RETURN(rc); } // endif fp @@ -4857,7 +5553,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endswitch type if ((fp)->real_maybe_null() && !IsTypeNullable(type)) { - my_printf_error(ER_UNKNOWN_ERROR, + my_printf_error(ER_UNKNOWN_ERROR, "Table type %s does not support nullable columns", MYF(0), options->type); DBUG_RETURN(HA_ERR_UNSUPPORTED); @@ -4883,8 +5579,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endfor field - if ((sqlcom == SQLCOM_CREATE_TABLE || *GetTableName() == '#') - && IsFileType(type) && !options->filename) { + if ((sqlcom == SQLCOM_CREATE_TABLE || *GetTableName() == '#') && inward) { // The file name is not specified, create a default file in // the database directory named table_name.table_type. // (temporarily not done for XML because a void file causes @@ -4892,8 +5587,6 @@ int ha_connect::create(const char *name, TABLE *table_arg, char buf[256], fn[_MAX_PATH], dbpath[128], lwt[12]; int h; - strcpy(buf, GetTableName()); - // Check for incompatible options if (options->sepindex) { my_message(ER_UNKNOWN_ERROR, @@ -4902,12 +5595,12 @@ int ha_connect::create(const char *name, TABLE *table_arg, DBUG_RETURN(HA_ERR_UNSUPPORTED); } else if (GetTypeID(options->type) == TAB_VEC) if (!table->s->max_rows || options->split) { - my_printf_error(ER_UNKNOWN_ERROR, + my_printf_error(ER_UNKNOWN_ERROR, "%s tables whose file name is unspecified cannot be split", MYF(0), options->type); DBUG_RETURN(HA_ERR_UNSUPPORTED); } else if (options->header == 2) { - my_printf_error(ER_UNKNOWN_ERROR, + my_printf_error(ER_UNKNOWN_ERROR, "header=2 is not allowed for %s tables whose file name is unspecified", MYF(0), options->type); DBUG_RETURN(HA_ERR_UNSUPPORTED); @@ -4920,16 +5613,31 @@ int ha_connect::create(const char *name, TABLE *table_arg, break; } else lwt[i]= tolower(options->type[i]); - - strcat(strcat(buf, "."), lwt); - sprintf(g->Message, "No file name. Table will use %s", buf); - if (sqlcom == SQLCOM_CREATE_TABLE) - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (part_info) { + char *p; + + strcpy(dbpath, name); + p= strrchr(dbpath, slash); + strcpy(partname, ++p); + strcat(strcat(strcpy(buf, p), "."), lwt); + *p= 0; + } else { +#endif // WITH_PARTITION_STORAGE_ENGINE + strcat(strcat(strcpy(buf, GetTableName()), "."), lwt); + sprintf(g->Message, "No file name. Table will use %s", buf); + + if (sqlcom == SQLCOM_CREATE_TABLE) + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + + strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/"); +#if defined(WITH_PARTITION_STORAGE_ENGINE) + } // endif part_info +#endif // WITH_PARTITION_STORAGE_ENGINE - strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/"); PlugSetPath(fn, buf, dbpath); - + if ((h= ::open(fn, O_CREAT | O_EXCL, 0666)) == -1) { if (errno == EEXIST) sprintf(g->Message, "Default file %s already exists", fn); @@ -4944,32 +5652,47 @@ int ha_connect::create(const char *name, TABLE *table_arg, push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, "Congratulation, you just created a read-only void table!"); - } // endif + } // endif sqlcom if (xtrace) htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas); - // To check whether indices have to be made or remade + // To check whether indexes have to be made or remade if (!g->Xchk) { PIXDEF xdp; - // We should be in CREATE TABLE or ALTER_TABLE - if (sqlcom != SQLCOM_CREATE_TABLE && sqlcom != SQLCOM_ALTER_TABLE) + // We should be in CREATE TABLE, ALTER_TABLE or CREATE INDEX + if (!(sqlcom == SQLCOM_CREATE_TABLE || sqlcom == SQLCOM_ALTER_TABLE || + sqlcom == SQLCOM_CREATE_INDEX || sqlcom == SQLCOM_DROP_INDEX)) +// (sqlcom == SQLCOM_CREATE_INDEX && part_info) || +// (sqlcom == SQLCOM_DROP_INDEX && part_info))) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, - "Wrong command in create, please contact CONNECT team"); - - if (sqlcom == SQLCOM_ALTER_TABLE && g->Alchecked == 0 && - (!IsFileType(type) || FileExists(options->filename))) { - // This is an ALTER to CONNECT from another engine. - // It cannot be accepted because the table data would be lost - // except when the target file does not exist. - strcpy(g->Message, "Operation denied. Table data would be lost."); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + "Unexpected command in create, please contact CONNECT team"); + +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (part_info && !inward) + strcpy(partname, decode(g, strrchr(name, '#') + 1)); +// strcpy(partname, part_info->curr_part_elem->partition_name); +#endif // WITH_PARTITION_STORAGE_ENGINE + + if (g->Alchecked == 0 && + (!IsFileType(type) || FileExists(options->filename, false))) { + if (part_info) { + sprintf(g->Message, "Data repartition in %s is unchecked", partname); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + } else if (sqlcom == SQLCOM_ALTER_TABLE) { + // This is an ALTER to CONNECT from another engine. + // It cannot be accepted because the table data would be modified + // except when the target file does not exist. + strcpy(g->Message, "Operation denied. Table data would be modified."); + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } // endif part_info + } // endif outward // Get the index definitions - if (xdp= GetIndexInfo()) { + if ((xdp= GetIndexInfo()) || sqlcom == SQLCOM_DROP_INDEX) { if (options->multiple) { strcpy(g->Message, "Multiple tables are not indexable"); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); @@ -4981,17 +5704,23 @@ int ha_connect::create(const char *name, TABLE *table_arg, } else if (GetIndexType(type) == 1) { PDBUSER dup= PlgGetUser(g); PCATLG cat= (dup) ? dup->Catalog : NULL; - + if (cat) { cat->SetDataPath(g, table_arg->s->db.str); - + +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (part_info) + strcpy(partname, + decode(g, strrchr(name, (inward ? slash : '#')) + 1)); +#endif // WITH_PARTITION_STORAGE_ENGINE + if ((rc= optimize(table->in_use, NULL))) { htrc("Create rc=%d %s\n", rc, g->Message); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); rc= HA_ERR_INTERNAL_ERROR; } else CloseTable(g); - + } // endif cat } else if (!GetIndexType(type)) { @@ -5019,13 +5748,16 @@ int ha_connect::create(const char *name, TABLE *table_arg, - file does not exist or is void - user has file privilege */ -bool ha_connect::FileExists(const char *fn) +bool ha_connect::FileExists(const char *fn, bool bf) { if (!fn || !*fn) return false; + else if (IsPartitioned() && bf) + return true; if (table) { - char *s, filename[_MAX_PATH], path[128]; + char *s, tfn[_MAX_PATH], filename[_MAX_PATH], path[128]; + bool b= false; int n; struct stat info; @@ -5034,13 +5766,22 @@ bool ha_connect::FileExists(const char *fn) return true; #if defined(WIN32) - s= "\\"; + s= "\\"; #else // !WIN32 - s= "/"; + s= "/"; #endif // !WIN32 + if (IsPartitioned()) { + sprintf(tfn, fn, GetPartName()); + + // This is to avoid an initialization error raised by the + // test on check_table_flags made in ha_partition::open + // that can fail if some partition files are empty. + b= true; + } else + strcpy(tfn, fn); strcat(strcat(strcat(strcpy(path, "."), s), table->s->db.str), s); - PlugSetPath(filename, fn, path); + PlugSetPath(filename, tfn, path); n= stat(filename, &info); if (n < 0) { @@ -5054,7 +5795,7 @@ bool ha_connect::FileExists(const char *fn) return false; } else - return (info.st_size) ? true : false; + return (info.st_size || b) ? true : false; } // endif table @@ -5177,7 +5918,7 @@ bool ha_connect::NoFieldOptionChange(TABLE *tab) */ enum_alter_inplace_result ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, - Alter_inplace_info *ha_alter_info) + Alter_inplace_info *ha_alter_info) { DBUG_ENTER("check_if_supported_alter"); @@ -5214,11 +5955,11 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, // Index operations Alter_inplace_info::HA_ALTER_FLAGS index_operations= - Alter_inplace_info::ADD_INDEX | + Alter_inplace_info::ADD_INDEX | Alter_inplace_info::DROP_INDEX | - Alter_inplace_info::ADD_UNIQUE_INDEX | + Alter_inplace_info::ADD_UNIQUE_INDEX | Alter_inplace_info::DROP_UNIQUE_INDEX | - Alter_inplace_info::ADD_PK_INDEX | + Alter_inplace_info::ADD_PK_INDEX | Alter_inplace_info::DROP_PK_INDEX; Alter_inplace_info::HA_ALTER_FLAGS inplace_offline_operations= @@ -5226,7 +5967,8 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_info::ALTER_COLUMN_NAME | Alter_inplace_info::ALTER_COLUMN_DEFAULT | Alter_inplace_info::CHANGE_CREATE_OPTION | - Alter_inplace_info::ALTER_RENAME | index_operations; + Alter_inplace_info::ALTER_RENAME | + Alter_inplace_info::ALTER_PARTITIONED | index_operations; if (ha_alter_info->handler_flags & index_operations || !SameString(altered_table, "optname") || @@ -5280,7 +6022,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, char *fn= GetStringOption("filename"); tshp= NULL; - if (FileExists(fn)) { + if (FileExists(fn, false)) { strcpy(g->Message, "Operation denied. Table data would be lost."); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); DBUG_RETURN(HA_ALTER_ERROR); @@ -5324,7 +6066,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, // This was in check_if_incompatible_data if (NoFieldOptionChange(altered_table) && - type == newtyp && + type == newtyp && SameInt(altered_table, "lrecl") && SameInt(altered_table, "elements") && SameInt(altered_table, "header") && @@ -5336,12 +6078,14 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, fin: if (idx) { // Indexing is only supported inplace - my_message(ER_ALTER_OPERATION_NOT_SUPPORTED, + my_message(ER_ALTER_OPERATION_NOT_SUPPORTED, "Alter operations not supported together by CONNECT", MYF(0)); DBUG_RETURN(HA_ALTER_ERROR); } else if (outward) { - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, - "This is an outward table, table data were not modified."); + if (IsFileType(type)) + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, + "This is an outward table, table data were not modified."); + DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK); } else DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); @@ -5366,11 +6110,88 @@ bool ha_connect::check_if_incompatible_data(HA_CREATE_INFO *info, { DBUG_ENTER("ha_connect::check_if_incompatible_data"); // TO DO: really implement and check it. - push_warning(ha_thd(), Sql_condition::WARN_LEVEL_WARN, 0, + push_warning(ha_thd(), Sql_condition::WARN_LEVEL_WARN, 0, "Unexpected call to check_if_incompatible_data."); DBUG_RETURN(COMPATIBLE_DATA_NO); } // end of check_if_incompatible_data +/**************************************************************************** + * CONNECT MRR implementation: use DS-MRR + This is just copied from myisam + ***************************************************************************/ + +int ha_connect::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, + uint n_ranges, uint mode, + HANDLER_BUFFER *buf) +{ + return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf); +} // end of multi_range_read_init + +int ha_connect::multi_range_read_next(range_id_t *range_info) +{ + return ds_mrr.dsmrr_next(range_info); +} // end of multi_range_read_next + +ha_rows ha_connect::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, + void *seq_init_param, + uint n_ranges, uint *bufsz, + uint *flags, Cost_estimate *cost) +{ + /* + This call is here because there is no location where this->table would + already be known. + TODO: consider moving it into some per-query initialization call. + */ + ds_mrr.init(this, table); + + // MMR is implemented for "local" file based tables only + if (!IsFileType(GetRealType(GetTableOptionStruct()))) + *flags|= HA_MRR_USE_DEFAULT_IMPL; + + ha_rows rows= ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges, + bufsz, flags, cost); + xp->g->Mrr= !(*flags & HA_MRR_USE_DEFAULT_IMPL); + return rows; +} // end of multi_range_read_info_const + +ha_rows ha_connect::multi_range_read_info(uint keyno, uint n_ranges, uint keys, + uint key_parts, uint *bufsz, + uint *flags, Cost_estimate *cost) +{ + ds_mrr.init(this, table); + + // MMR is implemented for "local" file based tables only + if (!IsFileType(GetRealType(GetTableOptionStruct()))) + *flags|= HA_MRR_USE_DEFAULT_IMPL; + + ha_rows rows= ds_mrr.dsmrr_info(keyno, n_ranges, keys, key_parts, bufsz, + flags, cost); + xp->g->Mrr= !(*flags & HA_MRR_USE_DEFAULT_IMPL); + return rows; +} // end of multi_range_read_info + + +int ha_connect::multi_range_read_explain_info(uint mrr_mode, char *str, + size_t size) +{ + return ds_mrr.dsmrr_explain_info(mrr_mode, str, size); +} // end of multi_range_read_explain_info + +/* CONNECT MRR implementation ends */ + +#if 0 +// Does this make sens for CONNECT? +Item *ha_connect::idx_cond_push(uint keyno_arg, Item* idx_cond_arg) +{ + pushed_idx_cond_keyno= keyno_arg; + pushed_idx_cond= idx_cond_arg; + in_range_check_pushed_down= TRUE; + if (active_index == pushed_idx_cond_keyno) + mi_set_index_cond_func(file, handler_index_cond_check, this); + return NULL; +} +#endif // 0 + struct st_mysql_storage_engine connect_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; @@ -5445,10 +6266,10 @@ maria_declare_plugin(connect) PLUGIN_LICENSE_GPL, connect_init_func, /* Plugin Init */ connect_done_func, /* Plugin Deinit */ - 0x0102, /* version number (1.02) */ + 0x0103, /* version number (1.03) */ NULL, /* status variables */ connect_system_variables, /* system variables */ - "1.02", /* string version */ + "1.03", /* string version */ MariaDB_PLUGIN_MATURITY_BETA /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h index a8d0be4c03e..f12582b9b19 100644 --- a/storage/connect/ha_connect.h +++ b/storage/connect/ha_connect.h @@ -1,4 +1,4 @@ -/* Copyright (C) Olivier Bertrand 2004 - 2013 +/* Copyright (C) Olivier Bertrand 2004 - 2014 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -50,7 +50,7 @@ typedef struct _xinfo { class XCHK : public BLOCK { public: - XCHK(void) {oldsep= newsep= false; + XCHK(void) {oldsep= newsep= false; oldopn= newopn= NULL; oldpix= newpix= NULL;} @@ -71,7 +71,8 @@ public: typedef class XCHK *PCHK; typedef class user_connect *PCONNECT; typedef struct ha_table_option_struct TOS, *PTOS; -typedef struct ha_field_option_struct FOS, *PFOS; +typedef struct ha_field_option_struct FOS, *PFOS; +typedef struct ha_index_option_struct XOS, *PXOS; extern handlerton *connect_hton; @@ -122,12 +123,27 @@ struct ha_table_option_struct { struct ha_field_option_struct { ulonglong offset; + ulonglong freq; ulonglong fldlen; + uint opt; const char *dateformat; const char *fieldformat; char *special; }; +/* + index options can be declared similarly + using the ha_index_option_struct structure. + + Their values can be specified in the CREATE TABLE per index: + CREATE TABLE ( field ..., .., INDEX .... *here*, ... ) +*/ +struct ha_index_option_struct +{ + bool dynamic; + bool mapped; +}; + /** @brief CONNECT_SHARE is a structure that will be shared among all open handlers. This example implements the minimum of what you will probably need. @@ -166,32 +182,39 @@ public: static bool connect_init(void); static bool connect_end(void); TABTYPE GetRealType(PTOS pos= NULL); + char *GetRealString(const char *s); char *GetStringOption(char *opname, char *sdef= NULL); PTOS GetTableOptionStruct(TABLE_SHARE *s= NULL); bool GetBooleanOption(char *opname, bool bdef); bool SetBooleanOption(char *opname, bool b); int GetIntegerOption(char *opname); + bool GetIndexOption(KEY *kp, char *opname); bool CheckString(const char *str1, const char *str2); bool SameString(TABLE *tab, char *opn); bool SetIntegerOption(char *opname, int n); bool SameInt(TABLE *tab, char *opn); bool SameBool(TABLE *tab, char *opn); - bool FileExists(const char *fn); + bool FileExists(const char *fn, bool bf); bool NoFieldOptionChange(TABLE *tab); PFOS GetFieldOptionStruct(Field *fp); void *GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf); + PXOS GetIndexOptionStruct(KEY *kp); PIXDEF GetIndexInfo(TABLE_SHARE *s= NULL); const char *GetDBName(const char *name); const char *GetTableName(void); + char *GetPartName(void); //int GetColNameLen(Field *fp); //char *GetColName(Field *fp); //void AddColName(char *cp, Field *fp); TABLE *GetTable(void) {return table;} bool IsSameIndex(PIXDEF xp1, PIXDEF xp2); + bool IsPartitioned(void); + bool IsUnique(uint n); PTDB GetTDB(PGLOBAL g); int OpenTable(PGLOBAL g, bool del= false); - bool IsOpened(void); + bool CheckColumnList(PGLOBAL g); + bool IsOpened(void); int CloseTable(PGLOBAL g); int MakeRecord(char *buf); int ScanRecord(PGLOBAL g, uchar *buf); @@ -318,17 +341,19 @@ public: @note The pushed conditions form a stack (from which one can remove the last pushed condition using cond_pop). - The table handler filters out rows using (pushed_cond1 AND pushed_cond2 + The table handler filters out rows using (pushed_cond1 AND pushed_cond2 AND ... AND pushed_condN) or less restrictive condition, depending on handler's capabilities. handler->ha_reset() call empties the condition stack. Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the condition stack. - */ + */ virtual const COND *cond_push(const COND *cond); PCFIL CheckCond(PGLOBAL g, PCFIL filp, AMT tty, Item *cond); const char *GetValStr(OPVAL vop, bool neg); +PFIL CondFilter(PGLOBAL g, Item *cond); +//PFIL CheckFilter(PGLOBAL g); /** Number of rows in table. It will only be called if @@ -336,7 +361,7 @@ const char *GetValStr(OPVAL vop, bool neg); */ virtual ha_rows records(); - /** + /** Type of table for caching query CONNECT should not use caching because its tables are external data prone to me modified out of MariaDB @@ -463,6 +488,28 @@ int index_prev(uchar *buf); enum thr_lock_type lock_type); ///< required int optimize(THD* thd, HA_CHECK_OPT* check_opt); + /** + * Multi Range Read interface + */ + int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, + uint n_ranges, uint mode, HANDLER_BUFFER *buf); + int multi_range_read_next(range_id_t *range_info); + ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, + void *seq_init_param, + uint n_ranges, uint *bufsz, + uint *flags, Cost_estimate *cost); + ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys, + uint key_parts, uint *bufsz, + uint *flags, Cost_estimate *cost); + int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size); + + int reset(void) {ds_mrr.dsmrr_close(); return 0;} + + /* Index condition pushdown implementation */ +// Item *idx_cond_push(uint keyno, Item* idx_cond); +private: + DsMrr_impl ds_mrr; + protected: bool check_privileges(THD *thd, PTOS options, char *dbn); MODE CheckMode(PGLOBAL g, THD *thd, MODE newmode, bool *chk, bool *cras); @@ -478,18 +525,22 @@ protected: PVAL sdvalin; // Used to convert date values PVAL sdvalout; // Used to convert date values bool istable; // True for table handler -//char tname[64]; // The table name + char partname[64]; // The partition name MODE xmod; // Table mode XINFO xinfo; // The table info structure bool valid_info; // True if xinfo is valid bool stop; // Used when creating index bool alter; // True when converting to other engine + bool mrr; // True when getting index positions + bool nox; // True when index should not be made + bool abort; // True after error in UPDATE/DELETE int indexing; // Type of indexing for CONNECT int locked; // Table lock + MY_BITMAP *part_id; // Columns used for partition func THR_LOCK_DATA lock_data; public: - TABLE_SHARE *tshp; // Used by called tables + TABLE_SHARE *tshp; // Used by called tables char *data_file_name; char *index_file_name; uint int_table_flags; // Inherited from MyISAM diff --git a/storage/connect/macutil.cpp b/storage/connect/macutil.cpp index 3069aa71cd6..4d3022b91b6 100644 --- a/storage/connect/macutil.cpp +++ b/storage/connect/macutil.cpp @@ -103,7 +103,7 @@ int MACINFO::GetNadap(PGLOBAL g) } // endif MaxSize return N; - } // end of GetMaxSize + } // end of GetNadap /***********************************************************************/ /* GetMacInfo: Get info for all found adapters. */ diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index 10b12c0809b..ca09e877b1a 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -571,6 +571,7 @@ PTDB MYCAT::GetTable(PGLOBAL g, PTABLE tablep, MODE mode, LPCSTR type) tdbp->GetAmType()); tablep->SetTo_Tdb(tdbp); tdbp->SetTable(tablep); + tdbp->SetMode(mode); } // endif tdbp return (tdbp); diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp index 8700a24ac96..0c4b50f1d0b 100644 --- a/storage/connect/myconn.cpp +++ b/storage/connect/myconn.cpp @@ -364,7 +364,7 @@ PQRYRES SrcColumns(PGLOBAL g, const char *host, const char *db, if (!port) port = mysqld_port; - if (!strnicmp(srcdef, "select ", 7)) { + if (!strnicmp(srcdef, "select ", 7)) { query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 9); strcat(strcpy(query, srcdef), " LIMIT 0"); } else @@ -686,19 +686,48 @@ int MYSQLC::ExecSQL(PGLOBAL g, const char *query, int *w) rc = RC_NF; } // endif field count -if (w) -//*w = mysql_warning_count(m_DB); - *w = m_DB->warning_count; + if (w) +// *w = mysql_warning_count(m_DB); + *w = m_DB->warning_count; return rc; } // end of ExecSQL /***********************************************************************/ +/* Get table size by executing "select count(*) from table_name". */ +/***********************************************************************/ +int MYSQLC::GetTableSize(PGLOBAL g, PSZ query) + { + if (mysql_real_query(m_DB, query, strlen(query))) { +#if defined(_DEBUG) + char *msg = (char*)PlugSubAlloc(g, NULL, 512 + strlen(query)); + + sprintf(msg, "(%d) %s [%s]", mysql_errno(m_DB), + mysql_error(m_DB), query); + strncpy(g->Message, msg, sizeof(g->Message) - 1); + g->Message[sizeof(g->Message) - 1] = 0; +#endif // _DEBUG + return -2; + } // endif mysql_real_query + + if (!(m_Res = mysql_store_result(m_DB))) + return -3; + + // Get the resulting count value + m_Rows = (int)mysql_num_rows(m_Res); // Should be 1 + + if (m_Rows && (m_Row = mysql_fetch_row(m_Res))) + return atoi(*m_Row); + + return -4; + } // end of GetTableSize + +/***********************************************************************/ /* Move to a specific row and column */ /***********************************************************************/ void MYSQLC::DataSeek(my_ulonglong row) { - MYSQL_ROWS *tmp=0; + MYSQL_ROWS *tmp = 0; //DBUG_PRINT("info",("mysql_data_seek(%ld)",(long) row)); if (m_Res->data) @@ -873,7 +902,7 @@ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb) else { if (!*row && crp->Nulls) crp->Nulls[n] = '*'; // Null value - + crp->Kdata->Reset(n); } // endelse *row } @@ -970,7 +999,7 @@ void MYSQLC::DiscardResults(void) while (!mysql_next_result(m_DB)) { res = mysql_store_result(m_DB); mysql_free_result(res); - } // endwhile next result + } // endwhile next result } // end of DiscardResults #endif // 0 diff --git a/storage/connect/myconn.h b/storage/connect/myconn.h index 7e892eece34..65e6531aee4 100644 --- a/storage/connect/myconn.h +++ b/storage/connect/myconn.h @@ -64,6 +64,7 @@ class DllItem MYSQLC { // Methods int GetResultSize(PGLOBAL g, PSZ sql); + int GetTableSize(PGLOBAL g, PSZ query); int Open(PGLOBAL g, const char *host, const char *db, const char *user= "root", const char *pwd= "*", int pt= 0); diff --git a/storage/connect/mysql-test/connect/r/alter.result b/storage/connect/mysql-test/connect/r/alter.result index ccfae3f4ddb..77d775220ec 100644 --- a/storage/connect/mysql-test/connect/r/alter.result +++ b/storage/connect/mysql-test/connect/r/alter.result @@ -21,8 +21,8 @@ DROP INDEX xd ON t1; ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d); SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 1 xc 1 c A NULL NULL NULL XPLUG -t1 1 xd 1 d A NULL NULL NULL XPLUG +t1 1 xc 1 c A NULL NULL NULL XINDEX +t1 1 xd 1 d A NULL NULL NULL XINDEX ALTER TABLE t1 DROP INDEX xc, DROP INDEX xd; SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment @@ -111,8 +111,8 @@ t1 CREATE TABLE `t1` ( ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`=DBF SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 1 xc 1 c A NULL NULL NULL XPLUG -t1 1 xd 1 d A NULL NULL NULL XPLUG +t1 1 xc 1 c A NULL NULL NULL XINDEX +t1 1 xd 1 d A NULL NULL NULL XINDEX SELECT * FROM t1; c d 1 One @@ -143,8 +143,8 @@ line ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d); SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 1 xc 1 c A NULL NULL NULL XPLUG -t1 1 xd 1 d A NULL NULL NULL XPLUG +t1 1 xc 1 c A NULL NULL NULL XINDEX +t1 1 xd 1 d A NULL NULL NULL XINDEX SELECT d FROM t1 WHERE c = 2; d Two @@ -218,13 +218,22 @@ Three 3 # Changing to another engine is Ok # However, the data file is not deleted. # -ALTER TABLE t1 ENGINE=MARIA; +ALTER TABLE t1 ENGINE=ARIA; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `d` char(10) NOT NULL /* `FLAG`=11 */, + `c` int(11) NOT NULL /* `FLAG`=0 */ +) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 /* `TABLE_TYPE`=fix `FILE_NAME`='tf1.txt' `ENDING`=1 */ +set @old_sql_mode=@@sql_mode; +set sql_mode=ignore_bad_table_options; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `d` char(10) NOT NULL `FLAG`=11, `c` int(11) NOT NULL `FLAG`=0 ) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 `TABLE_TYPE`=fix `FILE_NAME`='tf1.txt' `ENDING`=1 +set sql_mode=@old_sql_mode; SELECT * from t1; d c One 1 @@ -240,7 +249,7 @@ line # Sure enough, the data file was not deleted. # ALTER TABLE t1 ENGINE=CONNECT; -ERROR HY000: Operation denied. Table data would be lost. +ERROR HY000: Operation denied. Table data would be modified. # # But changing back to CONNECT succeed # if the data file does not exist. diff --git a/storage/connect/mysql-test/connect/r/alter_xml.result b/storage/connect/mysql-test/connect/r/alter_xml.result index bd3b281b05b..f2250b78d2d 100644 --- a/storage/connect/mysql-test/connect/r/alter_xml.result +++ b/storage/connect/mysql-test/connect/r/alter_xml.result @@ -35,7 +35,7 @@ Warning 1105 No table_type. Will be set to DOS SELECT * FROM t2; line <?xml version="1.0" encoding="UTF-8"?> -<!-- Created by CONNECT Version 1.02.0002 March 16, 2014 --> +<!-- Created by the MariaDB CONNECT Storage Engine--> <t1> <row> <TH>c</TH> @@ -71,7 +71,7 @@ t1 CREATE TABLE `t1` ( SELECT * FROM t2; line <?xml version="1.0" encoding="UTF-8"?> -<!-- Created by CONNECT Version 1.02.0002 March 16, 2014 --> +<!-- Created by the MariaDB CONNECT Storage Engine--> <t1> <row d="One"> <c>1</c> diff --git a/storage/connect/mysql-test/connect/r/mysql.result b/storage/connect/mysql-test/connect/r/mysql.result index fc2fe2418cf..29f077c3d9f 100644 --- a/storage/connect/mysql-test/connect/r/mysql.result +++ b/storage/connect/mysql-test/connect/r/mysql.result @@ -282,8 +282,6 @@ a 20 30 ALTER TABLE t2 MODIFY a TINYINT; -Warnings: -Warning 1105 This is an outward table, table data were not modified. SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( diff --git a/storage/connect/mysql-test/connect/r/occur.result b/storage/connect/mysql-test/connect/r/occur.result index fbcda4660be..a497dfc9942 100644 --- a/storage/connect/mysql-test/connect/r/occur.result +++ b/storage/connect/mysql-test/connect/r/occur.result @@ -193,8 +193,6 @@ Kevin 8 Lisbeth 2 Mary 2 ALTER TABLE xpet MODIFY number INT NOT NULL; -Warnings: -Warning 1105 This is an outward table, table data were not modified. SELECT * FROM xpet; name race number John dog 2 diff --git a/storage/connect/mysql-test/connect/r/part_file.result b/storage/connect/mysql-test/connect/r/part_file.result new file mode 100644 index 00000000000..bd5c258a4e2 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/part_file.result @@ -0,0 +1,344 @@ +# This will be used to see what data files are created +CREATE TABLE dr1 ( +fname VARCHAR(256) NOT NULL FLAG=2, +ftype CHAR(8) NOT NULL FLAG=3 +# ,FSIZE INT(6) NOT NULL FLAG=5 removed because Unix size != Windows size +) engine=CONNECT table_type=DIR file_name='t1#P#*.*'; +# +# Testing partitioning on inward table +# +CREATE TABLE t1 ( +id INT NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10 +PARTITION BY RANGE(id) ( +PARTITION first VALUES LESS THAN(10), +PARTITION middle VALUES LESS THAN(50), +PARTITION last VALUES LESS THAN(MAXVALUE)); +INSERT INTO t1 VALUES(4, 'four'),(24, 'twenty four'); +INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +first 2 +middle 3 +last 2 +SELECT * FROM t1; +id msg +4 four +7 seven +24 twenty four +10 ten +40 forty +60 sixty +81 eighty one +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 last ALL NULL NULL NULL NULL 3 Using where +SELECT * FROM t1 WHERE id > 50; +id msg +60 sixty +81 eighty one +UPDATE t1 set id = 41 WHERE msg = 'four'; +ERROR HY000: Got error 174 'Cannot update column id because it is used for partitioning' from CONNECT +UPDATE t1 set msg = 'quatre' WHERE id = 4; +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +t1#P#first .csv +t1#P#last .csv +t1#P#middle .csv +# +# Altering partitioning on inward table +# +ALTER TABLE t1 +PARTITION by range(id) ( +PARTITION first VALUES LESS THAN(11), +PARTITION middle VALUES LESS THAN(50), +PARTITION last VALUES LESS THAN(MAXVALUE)); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +first 3 +middle 2 +last 2 +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +t1#P#first .csv +t1#P#last .csv +t1#P#middle .csv +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id=10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 first ALL NULL NULL NULL NULL 3 Using where +SELECT * FROM t1 WHERE id=10; +id msg +10 ten +DELETE FROM t1 WHERE id in (4,60); +SELECT * FROM t1; +id msg +7 seven +10 ten +24 twenty four +40 forty +81 eighty one +DROP TABLE t1; +# +# Testing partitioning on a void outward table +# +ALTER TABLE dr1 FILE_NAME='part*.*'; +CREATE TABLE t1 ( +rwid INT(6) DEFAULT 0 SPECIAL=ROWID, +rnum INT(6) DEFAULT 0 SPECIAL=ROWNUM, +prtn VARCHAR(64) DEFAULT '' SPECIAL=PARTID, +tbn VARCHAR(64) DEFAULT '' SPECIAL=TABID, +fid VARCHAR(256) DEFAULT '' SPECIAL=FNAME, +id INT KEY NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt'; +ALTER TABLE t1 +PARTITION by range columns(id) ( +PARTITION `1` VALUES LESS THAN(10), +PARTITION `2` VALUES LESS THAN(50), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A NULL NULL NULL XINDEX +INSERT INTO t1(id,msg) VALUES(4, 'four'); +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +INSERT INTO t1(id,msg) VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'); +INSERT INTO t1(id,msg) VALUES(72,'seventy two'),(20,'twenty'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +rwid rnum prtn tbn fid id msg +1 1 1 t1 part1 4 four +2 2 1 t1 part1 7 seven +3 3 1 t1 part1 1 one +4 4 1 t1 part1 8 eight +1 1 2 t1 part2 10 ten +2 2 2 t1 part2 40 forty +3 3 2 t1 part2 20 twenty +4 4 2 t1 part2 35 thirty five +1 1 3 t1 part3 60 sixty +2 2 3 t1 part3 81 eighty one +3 3 3 t1 part3 72 seventy two +SELECT * FROM t1 order by id; +rwid rnum prtn tbn fid id msg +3 3 1 t1 part1 1 one +1 1 1 t1 part1 4 four +2 2 1 t1 part1 7 seven +4 4 1 t1 part1 8 eight +1 1 2 t1 part2 10 ten +3 3 2 t1 part2 20 twenty +4 4 2 t1 part2 35 thirty five +2 2 2 t1 part2 40 forty +1 1 3 t1 part3 60 sixty +3 3 3 t1 part3 72 seventy two +2 2 3 t1 part3 81 eighty one +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 2 const PRIMARY PRIMARY 4 const 1 +SELECT * FROM t1 WHERE id = 10; +rwid rnum prtn tbn fid id msg +1 1 2 t1 part2 10 ten +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 2,3 range PRIMARY PRIMARY 4 NULL 7 Using where +SELECT * FROM t1 WHERE id >= 10; +rwid rnum prtn tbn fid id msg +1 1 2 t1 part2 10 ten +3 3 2 t1 part2 20 twenty +4 4 2 t1 part2 35 thirty five +2 2 2 t1 part2 40 forty +1 1 3 t1 part3 60 sixty +3 3 3 t1 part3 72 seventy two +2 2 3 t1 part3 81 eighty one +SELECT count(*) FROM t1 WHERE id < 10; +count(*) +4 +SELECT case when id < 10 then 1 when id < 50 then 2 else 3 end as pn, count(*) FROM t1 group by pn; +pn count(*) +1 4 +2 4 +3 3 +SELECT prtn, count(*) FROM t1 group by prtn; +prtn count(*) +1 4 +2 4 +3 3 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 3 range PRIMARY PRIMARY 4 NULL 3 Using where +SELECT * FROM t1 WHERE id = 35; +rwid rnum prtn tbn fid id msg +4 4 2 t1 part2 35 thirty five +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +# This does not change the partition file data and is WRONG +ALTER TABLE t1 +PARTITION by range columns(id) ( +PARTITION `1` VALUES LESS THAN(11), +PARTITION `2` VALUES LESS THAN(70), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SELECT CASE WHEN id < 11 THEN 1 WHEN id < 70 THEN 2 ELSE 3 END AS pn, COUNT(*) FROM t1 GROUP BY pn; +pn COUNT(*) +1 5 +2 4 +3 2 +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +# +# This is the correct way to change partitioning: +# Save table values, erase the table, then re-insert saved values in modified table +# +CREATE TABLE t2 ( +id INT NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=FIX; +Warnings: +Warning 1105 No file name. Table will use t2.fix +INSERT INTO t2 SELECT id, msg FROM t1; +DELETE FROM t1; +INSERT INTO t1(id,msg) SELECT * FROM t2; +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 5 +2 4 +3 2 +SELECT * FROM t1; +rwid rnum prtn tbn fid id msg +1 1 1 t1 part1 4 four +2 2 1 t1 part1 7 seven +3 3 1 t1 part1 1 one +4 4 1 t1 part1 8 eight +5 5 1 t1 part1 10 ten +1 1 2 t1 part2 40 forty +2 2 2 t1 part2 20 twenty +3 3 2 t1 part2 35 thirty five +4 4 2 t1 part2 60 sixty +1 1 3 t1 part3 81 eighty one +2 2 3 t1 part3 72 seventy two +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +DROP TABLE t2; +DROP TABLE t1; +# +# Testing partitioning on a populated outward table +# +CREATE TABLE t1 ( +id INT NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt' +PARTITION by range columns(id) ( +PARTITION `1` VALUES LESS THAN(11), +PARTITION `2` VALUES LESS THAN(70), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 5 +2 4 +3 2 +SELECT * FROM t1 WHERE id < 11; +id msg +4 four +7 seven +1 one +8 eight +10 ten +SELECT * FROM t1 WHERE id >= 70; +id msg +81 eighty one +72 seventy two +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +# +# Testing indexing on a partitioned table +# +CREATE INDEX XID ON t1(id); +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 1 XID 1 id A NULL NULL NULL XINDEX +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 1 ref XID XID 4 const 1 +DROP INDEX XID ON t1; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .txt +part2 .txt +part3 .txt +ALTER TABLE t1 ADD PRIMARY KEY (id); +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A NULL NULL NULL XINDEX +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 1 const PRIMARY PRIMARY 4 const 1 +ALTER TABLE t1 DROP PRIMARY KEY; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .txt +part2 .txt +part3 .txt +DROP TABLE t1; +DROP TABLE dr1; diff --git a/storage/connect/mysql-test/connect/r/part_table.result b/storage/connect/mysql-test/connect/r/part_table.result new file mode 100644 index 00000000000..09d46687f00 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/part_table.result @@ -0,0 +1,195 @@ +CREATE TABLE xt1 ( +id INT KEY NOT NULL, +msg VARCHAR(32)) +ENGINE=MyISAM; +INSERT INTO xt1 VALUES(4, 'four'),(7,'seven'),(1,'one'),(8,'eight'); +SELECT * FROM xt1; +id msg +4 four +7 seven +1 one +8 eight +CREATE TABLE xt2 ( +id INT KEY NOT NULL, +msg VARCHAR(32)); +INSERT INTO xt2 VALUES(10,'ten'),(40,'forty'),(11,'eleven'),(35,'thirty five'); +SELECT * FROM xt2; +id msg +10 ten +40 forty +11 eleven +35 thirty five +CREATE TABLE xt3 ( +id INT KEY NOT NULL, +msg VARCHAR(32)) +ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10; +Warnings: +Warning 1105 No file name. Table will use xt3.csv +INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two'); +SELECT * FROM xt3; +id msg +60 sixty +81 eighty one +72 seventy two +CREATE TABLE t1 ( +id INT NOT NULL, +msg VARCHAR(32)) +ENGINE=CONNECT TABLE_TYPE=PROXY TABNAME='xt%s' +PARTITION BY RANGE COLUMNS(id) ( +PARTITION `1` VALUES LESS THAN(10), +PARTITION `2` VALUES LESS THAN(50), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +id msg +4 four +7 seven +1 one +8 eight +10 ten +40 forty +11 eleven +35 thirty five +60 sixty +81 eighty one +72 seventy two +DELETE FROM t1; +Warnings: +Note 1105 xt1: 4 affected rows +Note 1105 xt2: 4 affected rows +ALTER TABLE t1 ADD INDEX XID(id); +ERROR HY000: Table type PROXY is not indexable +INSERT INTO t1 VALUES(4, 'four'); +INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'); +INSERT INTO t1 VALUES(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +id msg +4 four +7 seven +1 one +8 eight +10 ten +40 forty +11 eleven +35 thirty five +60 sixty +81 eighty one +72 seventy two +EXPLAIN PARTITIONS +SELECT * FROM t1 WHERE id = 81; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 3 ALL NULL NULL NULL NULL 4 Using where +DELETE FROM t1; +Warnings: +Note 1105 xt1: 4 affected rows +Note 1105 xt2: 4 affected rows +DROP TABLE t1; +CREATE TABLE t1 ( +id INT KEY NOT NULL, +msg VARCHAR(32)) +ENGINE=CONNECT TABLE_TYPE=MYSQL TABNAME='xt%s' +PARTITION BY RANGE COLUMNS(id) ( +PARTITION `1` VALUES LESS THAN(10), +PARTITION `2` VALUES LESS THAN(50), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id NULL NULL NULL NULL REMOTE +INSERT INTO t1 VALUES(4, 'four'); +INSERT INTO t1 VALUES(40, 'forty'); +INSERT INTO t1 VALUES(72,'seventy two'); +INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(60,'sixty'),(81,'eighty one'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +id msg +4 four +7 seven +1 one +8 eight +40 forty +10 ten +11 eleven +35 thirty five +72 seventy two +60 sixty +81 eighty one +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 81; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 3 const PRIMARY PRIMARY 4 const 1 +SELECT * FROM t1 WHERE id = 7; +id msg +7 seven +SELECT * FROM t1 WHERE id = 35; +id msg +35 thirty five +UPDATE t1 SET msg = 'number' WHERE id in (60,72); +Warnings: +Note 1105 xt3: 2 affected rows +Note 1105 xt3: 0 affected rows +UPDATE t1 SET msg = 'soixante' WHERE id = 60; +Warnings: +Note 1105 xt3: 1 affected rows +SELECT * FROM t1 WHERE id > 50; +id msg +60 soixante +72 number +81 eighty one +UPDATE t1 SET msg = 'big' WHERE id > 50; +Warnings: +Note 1105 xt3: 3 affected rows +UPDATE t1 SET msg = 'sept' WHERE id = 7; +Warnings: +Note 1105 xt1: 1 affected rows +SELECT * FROM t1; +id msg +4 four +7 sept +1 one +8 eight +40 forty +10 ten +11 eleven +35 thirty five +72 big +60 big +81 big +DELETE FROM t1 WHERE id in (60,72); +Warnings: +Note 1105 xt3: 2 affected rows +Note 1105 xt3: 0 affected rows +SELECT * FROM t1; +id msg +4 four +7 sept +1 one +8 eight +40 forty +10 ten +11 eleven +35 thirty five +81 big +DROP TABLE t1; +DROP TABLE xt1; +DROP TABLE xt2; +DROP TABLE xt3; diff --git a/storage/connect/mysql-test/connect/r/pivot.result b/storage/connect/mysql-test/connect/r/pivot.result index 4b39a21d3d9..349db89fa35 100644 --- a/storage/connect/mysql-test/connect/r/pivot.result +++ b/storage/connect/mysql-test/connect/r/pivot.result @@ -59,8 +59,6 @@ Joe 5 14.00 0.00 12.00 # Restricting the columns in a Pivot Table # ALTER TABLE pivex DROP COLUMN week; -Warnings: -Warning 1105 This is an outward table, table data were not modified. SELECT * FROM pivex; Who Beer Car Food Beth 51.00 0.00 29.00 diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 5018eec47fc..eea53bf55c7 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -416,7 +416,7 @@ DROP TABLE t1; SET @a=LOAD_FILE('MYSQLD_DATADIR/test/t1.xml'); SELECT CAST(@a AS CHAR CHARACTER SET latin1); CAST(@a AS CHAR CHARACTER SET latin1) <?xml version="1.0" encoding="iso-8859-1"?> -<!-- Created by CONNECT Version 1.02.0002 March 16, 2014 --> +<!-- Created by the MariaDB CONNECT Storage Engine--> <t1> <line> <node>ÀÁÂÃ</node> diff --git a/storage/connect/mysql-test/connect/t/alter.test b/storage/connect/mysql-test/connect/t/alter.test index 299381b925a..49f34996bbd 100644 --- a/storage/connect/mysql-test/connect/t/alter.test +++ b/storage/connect/mysql-test/connect/t/alter.test @@ -105,8 +105,12 @@ SELECT * FROM t1; --echo # Changing to another engine is Ok
--echo # However, the data file is not deleted.
--echo #
-ALTER TABLE t1 ENGINE=MARIA;
+ALTER TABLE t1 ENGINE=ARIA;
SHOW CREATE TABLE t1;
+set @old_sql_mode=@@sql_mode;
+set sql_mode=ignore_bad_table_options;
+SHOW CREATE TABLE t1;
+set sql_mode=@old_sql_mode;
SELECT * from t1;
SELECT * from t2;
diff --git a/storage/connect/mysql-test/connect/t/part_file.test b/storage/connect/mysql-test/connect/t/part_file.test new file mode 100644 index 00000000000..159908b6d9b --- /dev/null +++ b/storage/connect/mysql-test/connect/t/part_file.test @@ -0,0 +1,162 @@ +--source include/have_partition.inc
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--echo # This will be used to see what data files are created
+CREATE TABLE dr1 (
+ fname VARCHAR(256) NOT NULL FLAG=2,
+ ftype CHAR(8) NOT NULL FLAG=3
+# ,FSIZE INT(6) NOT NULL FLAG=5 removed because Unix size != Windows size
+) engine=CONNECT table_type=DIR file_name='t1#P#*.*';
+
+--echo #
+--echo # Testing partitioning on inward table
+--echo #
+CREATE TABLE t1 (
+ id INT NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10
+PARTITION BY RANGE(id) (
+PARTITION first VALUES LESS THAN(10),
+PARTITION middle VALUES LESS THAN(50),
+PARTITION last VALUES LESS THAN(MAXVALUE));
+INSERT INTO t1 VALUES(4, 'four'),(24, 'twenty four');
+INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50;
+SELECT * FROM t1 WHERE id > 50;
+#TODO: Differences between Linux and Windows
+#SHOW TABLE STATUS LIKE 't1';
+--error ER_GET_ERRMSG
+UPDATE t1 set id = 41 WHERE msg = 'four';
+UPDATE t1 set msg = 'quatre' WHERE id = 4;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+--echo #
+--echo # Altering partitioning on inward table
+--echo #
+ALTER TABLE t1
+PARTITION by range(id) (
+PARTITION first VALUES LESS THAN(11),
+PARTITION middle VALUES LESS THAN(50),
+PARTITION last VALUES LESS THAN(MAXVALUE));
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM dr1 ORDER BY fname, ftype;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id=10;
+SELECT * FROM t1 WHERE id=10;
+DELETE FROM t1 WHERE id in (4,60);
+SELECT * FROM t1;
+DROP TABLE t1;
+# TODO: this fails on Linux
+#SELECT * FROM dr1;
+
+--echo #
+--echo # Testing partitioning on a void outward table
+--echo #
+ALTER TABLE dr1 FILE_NAME='part*.*';
+CREATE TABLE t1 (
+ rwid INT(6) DEFAULT 0 SPECIAL=ROWID,
+ rnum INT(6) DEFAULT 0 SPECIAL=ROWNUM,
+ prtn VARCHAR(64) DEFAULT '' SPECIAL=PARTID,
+ tbn VARCHAR(64) DEFAULT '' SPECIAL=TABID,
+ fid VARCHAR(256) DEFAULT '' SPECIAL=FNAME,
+ id INT KEY NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt';
+--replace_result $MYSQLD_DATADIR "DATADIR/"
+ALTER TABLE t1
+PARTITION by range columns(id) (
+PARTITION `1` VALUES LESS THAN(10),
+PARTITION `2` VALUES LESS THAN(50),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SHOW INDEX FROM t1;
+# TODO: this fails on Linux
+#SELECT * FROM dr1 ORDER BY fname, ftype;
+INSERT INTO t1(id,msg) VALUES(4, 'four');
+SELECT * FROM dr1 ORDER BY fname, ftype;
+INSERT INTO t1(id,msg) VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one');
+INSERT INTO t1(id,msg) VALUES(72,'seventy two'),(20,'twenty'),(1,'one'),(35,'thirty five'),(8,'eight');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+SELECT * FROM t1 order by id;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10;
+SELECT * FROM t1 WHERE id = 10;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 10;
+SELECT * FROM t1 WHERE id >= 10;
+SELECT count(*) FROM t1 WHERE id < 10;
+SELECT case when id < 10 then 1 when id < 50 then 2 else 3 end as pn, count(*) FROM t1 group by pn;
+SELECT prtn, count(*) FROM t1 group by prtn;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50;
+SELECT * FROM t1 WHERE id = 35;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+--echo # This does not change the partition file data and is WRONG
+ALTER TABLE t1
+PARTITION by range columns(id) (
+PARTITION `1` VALUES LESS THAN(11),
+PARTITION `2` VALUES LESS THAN(70),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SELECT CASE WHEN id < 11 THEN 1 WHEN id < 70 THEN 2 ELSE 3 END AS pn, COUNT(*) FROM t1 GROUP BY pn;
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM dr1 ORDER BY fname, ftype;
+--echo #
+--echo # This is the correct way to change partitioning:
+--echo # Save table values, erase the table, then re-insert saved values in modified table
+--echo #
+CREATE TABLE t2 (
+ id INT NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=FIX;
+INSERT INTO t2 SELECT id, msg FROM t1;
+DELETE FROM t1;
+INSERT INTO t1(id,msg) SELECT * FROM t2;
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+DROP TABLE t2;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing partitioning on a populated outward table
+--echo #
+CREATE TABLE t1 (
+ id INT NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt'
+PARTITION by range columns(id) (
+PARTITION `1` VALUES LESS THAN(11),
+PARTITION `2` VALUES LESS THAN(70),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1 WHERE id < 11;
+SELECT * FROM t1 WHERE id >= 70;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+
+--echo #
+--echo # Testing indexing on a partitioned table
+--echo #
+CREATE INDEX XID ON t1(id);
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10;
+DROP INDEX XID ON t1;
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+ALTER TABLE t1 ADD PRIMARY KEY (id);
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10;
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+DROP TABLE t1;
+DROP TABLE dr1;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/part1.txt
+--remove_file $MYSQLD_DATADIR/test/part2.txt
+--remove_file $MYSQLD_DATADIR/test/part3.txt
+#--remove_file $MYSQLD_DATADIR/test/part%s.fnx
+#--remove_file $MYSQLD_DATADIR/test/part1.fnx
+#--remove_file $MYSQLD_DATADIR/test/part2.fnx
+#--remove_file $MYSQLD_DATADIR/test/part3.fnx
diff --git a/storage/connect/mysql-test/connect/t/part_table.test b/storage/connect/mysql-test/connect/t/part_table.test new file mode 100644 index 00000000000..34e224efa6f --- /dev/null +++ b/storage/connect/mysql-test/connect/t/part_table.test @@ -0,0 +1,85 @@ +--source include/not_embedded.inc
+--source include/have_partition.inc
+
+#
+# These will be used by the t1 table partition table
+#
+CREATE TABLE xt1 (
+id INT KEY NOT NULL,
+msg VARCHAR(32))
+ENGINE=MyISAM;
+INSERT INTO xt1 VALUES(4, 'four'),(7,'seven'),(1,'one'),(8,'eight');
+SELECT * FROM xt1;
+
+CREATE TABLE xt2 (
+id INT KEY NOT NULL,
+msg VARCHAR(32));
+INSERT INTO xt2 VALUES(10,'ten'),(40,'forty'),(11,'eleven'),(35,'thirty five');
+SELECT * FROM xt2;
+
+CREATE TABLE xt3 (
+id INT KEY NOT NULL,
+msg VARCHAR(32))
+ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10;
+INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two');
+SELECT * FROM xt3;
+
+#
+# Based on PROXY the table is not indexable
+#
+CREATE TABLE t1 (
+id INT NOT NULL,
+msg VARCHAR(32))
+ENGINE=CONNECT TABLE_TYPE=PROXY TABNAME='xt%s'
+PARTITION BY RANGE COLUMNS(id) (
+PARTITION `1` VALUES LESS THAN(10),
+PARTITION `2` VALUES LESS THAN(50),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+DELETE FROM t1;
+--error ER_UNKNOWN_ERROR
+ALTER TABLE t1 ADD INDEX XID(id);
+INSERT INTO t1 VALUES(4, 'four');
+INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one');
+INSERT INTO t1 VALUES(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+EXPLAIN PARTITIONS
+SELECT * FROM t1 WHERE id = 81;
+DELETE FROM t1;
+DROP TABLE t1;
+
+#
+# Based on MYSQL the table is indexable
+#
+CREATE TABLE t1 (
+id INT KEY NOT NULL,
+msg VARCHAR(32))
+ENGINE=CONNECT TABLE_TYPE=MYSQL TABNAME='xt%s'
+PARTITION BY RANGE COLUMNS(id) (
+PARTITION `1` VALUES LESS THAN(10),
+PARTITION `2` VALUES LESS THAN(50),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SHOW INDEX FROM t1;
+INSERT INTO t1 VALUES(4, 'four');
+INSERT INTO t1 VALUES(40, 'forty');
+INSERT INTO t1 VALUES(72,'seventy two');
+INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(60,'sixty'),(81,'eighty one'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 81;
+SELECT * FROM t1 WHERE id = 7;
+SELECT * FROM t1 WHERE id = 35;
+UPDATE t1 SET msg = 'number' WHERE id in (60,72);
+UPDATE t1 SET msg = 'soixante' WHERE id = 60;
+SELECT * FROM t1 WHERE id > 50;
+UPDATE t1 SET msg = 'big' WHERE id > 50;
+UPDATE t1 SET msg = 'sept' WHERE id = 7;
+SELECT * FROM t1;
+DELETE FROM t1 WHERE id in (60,72);
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP TABLE xt1;
+DROP TABLE xt2;
+DROP TABLE xt3;
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index 9466bd83bea..1e540dfb2fe 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -28,6 +28,8 @@ enum BLKTYP {TYPE_TABLE = 50, /* Table Name/Srcdef/... Block */ TYPE_COLUMN = 51, /* Column Name/Qualifier Block */ TYPE_TDB = 53, /* Table Description Block */ TYPE_COLBLK = 54, /* Column Description Block */ + TYPE_FILTER = 55, /* Filter Description Block */ + TYPE_ARRAY = 63, /* General array type */ TYPE_PSZ = 64, /* Pointer to String ended by 0 */ TYPE_SQL = 65, /* Pointer to SQL block */ TYPE_XOBJECT = 69, /* Extended DB object */ @@ -83,6 +85,7 @@ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_SRVID = 5, /* SERVID type (special column) */ TYPE_AM_TABID = 6, /* TABID type (special column) */ TYPE_AM_CNSID = 7, /* CONSTID type (special column) */ + TYPE_AM_PRTID = 8, /* PARTID type (special column) */ TYPE_AM_COUNT = 10, /* CPT AM type no (count table) */ TYPE_AM_DCD = 20, /* Decode access method type no */ TYPE_AM_CMS = 30, /* CMS access method type no */ @@ -371,6 +374,7 @@ typedef class COLDEF *PCOLDEF; typedef class CONSTANT *PCONST; typedef class VALUE *PVAL; typedef class VALBLK *PVBLK; +typedef class FILTER *PFIL; typedef struct _fblock *PFBLOCK; typedef struct _mblock *PMBLOCK; @@ -416,6 +420,7 @@ typedef struct { /* User application block */ PFBLOCK Openlist; /* To file/map open list */ PMBLOCK Memlist; /* To memory block list */ PXUSED Xlist; /* To used index list */ + int Maxbmp; /* Maximum XDB2 bitmap size */ int Check; /* General level of checking */ int Numlines; /* Number of lines involved */ USETEMP UseTemp; /* Use temporary file */ @@ -461,6 +466,21 @@ typedef struct _tabs { } TABS; /***********************************************************************/ +/* Argument of expression, function, filter etc. (Xobject) */ +/***********************************************************************/ +typedef struct _arg { /* Argument */ + PXOB To_Obj; /* To the argument object */ + PVAL Value; /* Argument value */ + bool Conv; /* TRUE if conversion is required */ + } ARGBLK, *PARG; + +typedef struct _oper { /* Operator */ + PSZ Name; /* The input/output operator name */ + OPVAL Val; /* Operator numeric value */ + int Mod; /* The modificator */ + } OPER, *POPER; + +/***********************************************************************/ /* Following definitions are used to define table fields (columns). */ /***********************************************************************/ enum XFLD {FLD_NO = 0, /* Not a field definition item */ @@ -530,6 +550,7 @@ PPARM Vcolist(PGLOBAL, PTDB, PSZ, bool); void PlugPutOut(PGLOBAL, FILE *, short, void *, uint); void PlugLineDB(PGLOBAL, PSZ, short, void *, uint); char *PlgGetDataPath(PGLOBAL g); +char *ExtractFromPath(PGLOBAL, char *, char *, OPVAL); void AddPointer(PTABS, void *); PDTP MakeDateFormat(PGLOBAL, PSZ, bool, bool, int); int ExtractDate(char *, PDTP, int, int val[6]); @@ -538,14 +559,15 @@ int ExtractDate(char *, PDTP, int, int val[6]); /* Allocate the result structure that will contain result data. */ /**************************************************************************/ DllExport PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, - int *buftyp, XFLD *fldtyp, - unsigned int *length, + int *buftyp, XFLD *fldtyp, + unsigned int *length, bool blank, bool nonull); /***********************************************************************/ /* Exported utility routines. */ /***********************************************************************/ DllExport FILE *PlugOpenFile(PGLOBAL, LPCSTR, LPCSTR); +DllExport FILE *PlugReopenFile(PGLOBAL, PFBLOCK, LPCSTR); DllExport int PlugCloseFile(PGLOBAL, PFBLOCK, bool all = false); DllExport void PlugCleanup(PGLOBAL, bool); DllExport bool GetPromptAnswer(PGLOBAL, char *); @@ -561,7 +583,7 @@ DllExport void *PlgDBrealloc(PGLOBAL, void *, MBLOCK&, size_t); DllExport void NewPointer(PTABS, void *, void *); DllExport char *GetIni(int n= 0); DllExport void SetTrc(void); -DllExport char *GetListOption(PGLOBAL, const char *, const char *, +DllExport char *GetListOption(PGLOBAL, const char *, const char *, const char *def=NULL); #define MSGID_NONE 0 diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index c0f7fc1c253..66f7332c56a 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -336,16 +336,7 @@ PDBUSER PlgMakeUser(PGLOBAL g) } // endif dbuserp memset(dbuserp, 0, sizeof(DBUSERBLK)); -//dbuserp->Act2 = g->Activityp; -//#if defined(UNIX) -// dbuserp->LineLen = 160; -//#else -// dbuserp->LineLen = 78; -//#endif -//dbuserp->Maxres = MAXRES; -//dbuserp->Maxlin = MAXLIN; -//dbuserp->Maxbmp = MAXBMP; -//dbuserp->AlgChoice = AMOD_AUTO; + dbuserp->Maxbmp = MAXBMP; dbuserp->UseTemp = TMP_AUTO; dbuserp->Check = CHK_ALL; strcpy(dbuserp->Server, "CONNECT"); @@ -393,6 +384,31 @@ char *PlgGetDataPath(PGLOBAL g) } // end of PlgGetDataPath /***********************************************************************/ +/* Extract from a path name the required component. */ +/* This function assumes there is enough space in the buffer. */ +/***********************************************************************/ +char *ExtractFromPath(PGLOBAL g, char *pBuff, char *FileName, OPVAL op) + { + char *drive = NULL, *direc = NULL, *fname = NULL, *ftype = NULL; + + switch (op) { // Determine which part to extract +#if !defined(UNIX) + case OP_FDISK: drive = pBuff; break; +#endif // !UNIX + case OP_FPATH: direc = pBuff; break; + case OP_FNAME: fname = pBuff; break; + case OP_FTYPE: ftype = pBuff; break; + default: + sprintf(g->Message, MSG(INVALID_OPER), op, "ExtractFromPath"); + return NULL; + } // endswitch op + + // Now do the extraction + _splitpath(FileName, drive, direc, fname, ftype); + return pBuff; + } // end of PlgExtractFromPath + +/***********************************************************************/ /* Check the occurence and matching of a pattern against a string. */ /* Because this function is only used for catalog name checking, */ /* it must be case insensitive. */ @@ -820,6 +836,23 @@ FILE *PlugOpenFile(PGLOBAL g, LPCSTR fname, LPCSTR ftype) /* Close file routine: the purpose of this routine is to avoid */ /* double closing that freeze the system on some Unix platforms. */ /***********************************************************************/ +FILE *PlugReopenFile(PGLOBAL g, PFBLOCK fp, LPCSTR md) + { + FILE *fop; + + if ((fop = global_fopen(g, MSGID_OPEN_MODE_STRERROR, fp->Fname, md))) { + fp->Count = 1; + fp->Type = TYPE_FB_FILE; + fp->File = fop; + } /* endif fop */ + + return (fop); + } // end of PlugOpenFile + +/***********************************************************************/ +/* Close file routine: the purpose of this routine is to avoid */ +/* double closing that freeze the system on some Unix platforms. */ +/***********************************************************************/ int PlugCloseFile(PGLOBAL g, PFBLOCK fp, bool all) { int rc = 0; diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.c index 201aa5a4371..c3b77544983 100644 --- a/storage/connect/plugutil.c +++ b/storage/connect/plugutil.c @@ -2,11 +2,11 @@ /* */ /* PROGRAM NAME: PLUGUTIL */ /* ------------- */ -/* Version 2.7 */ +/* Version 2.8 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1993-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 1993-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -134,7 +134,7 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) PGLOBAL g; if (trace > 1) - htrc("PlugInit: Language='%s'\n", + htrc("PlugInit: Language='%s'\n", ((!Language) ? "Null" : (char*)Language)); if (!(g = malloc(sizeof(GLOBAL)))) { @@ -145,6 +145,7 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) g->Trace = 0; g->Createas = 0; g->Alchecked = 0; + g->Mrr = 0; g->Activityp = g->ActivityStart = NULL; g->Xchk = NULL; strcpy(g->Message, ""); @@ -298,7 +299,7 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) case '/': break; default: - // This supposes that defdir ends with a SLASH + // This supposes that defdir ends with a SLASH strcpy(direc, strcat(defdir, direc)); } // endswitch @@ -321,13 +322,13 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) /***********************************************************************/ /* PlugGetMessage: get a message from the message file. */ /***********************************************************************/ -char *PlugReadMessage(PGLOBAL g, int mid, char *m) +char *PlugReadMessage(PGLOBAL g, int mid, char *m) { char msgfile[_MAX_PATH], msgid[32], buff[256]; char *msg; FILE *mfile = NULL; - GetPrivateProfileString("Message", msglang, "Message\\english.msg", + GetPrivateProfileString("Message", msglang, "Message\\english.msg", msgfile, _MAX_PATH, plgini); if (!(mfile = fopen(msgfile, "rt"))) { @@ -377,7 +378,7 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m) /***********************************************************************/ /* PlugGetMessage: get a message from the resource string table. */ /***********************************************************************/ -char *PlugGetMessage(PGLOBAL g, int mid) +char *PlugGetMessage(PGLOBAL g, int mid) { char *msg; @@ -442,7 +443,7 @@ void *PlugAllocMem(PGLOBAL g, uint size) htrc("Memory of %u allocated at %p\n", size, areap); else htrc("PlugAllocMem: %s\n", g->Message); - + } // endif trace return (areap); @@ -521,7 +522,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) /***********************************************************************/ char *PlugDup(PGLOBAL g, const char *str) { - char *buf; + char *buf; size_t len; if (str && (len = strlen(str))) { diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index e7a96a12908..58bcbd202f3 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -1,11 +1,11 @@ /************* RelDef CPP Program Source Code File (.CPP) **************/ /* PROGRAM NAME: REFDEF */ /* ------------- */ -/* Version 1.3 */ +/* Version 1.4 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -127,24 +127,39 @@ int RELDEF::GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size) } // end of GetCharCatInfo /***********************************************************************/ +/* To be used by any TDB's. */ +/***********************************************************************/ +bool RELDEF::Partitioned(void) + { + return Hc->IsPartitioned(); + } // end of Partitioned + +/***********************************************************************/ /* This function returns string table information. */ /* Default parameter is "*" to get the handler default. */ /***********************************************************************/ char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef) { - char *sval= NULL, *s= Hc->GetStringOption(what, sdef); + char *name, *sval= NULL, *s= Hc->GetStringOption(what, sdef); if (s) { - sval= (char*)PlugSubAlloc(g, NULL, strlen(s) + 1); - strcpy(sval, s); + if (!Hc->IsPartitioned() || + (stricmp(what, "filename") && stricmp(what, "tabname") + && stricmp(what, "connect"))) { + sval= (char*)PlugSubAlloc(g, NULL, strlen(s) + 1); + strcpy(sval, s); + } else + sval= s; + } else if (!stricmp(what, "filename")) { // Return default file name char *ftype= Hc->GetStringOption("Type", "*"); int i, n; if (IsFileType(GetTypeID(ftype))) { - sval= (char*)PlugSubAlloc(g, NULL, strlen(Hc->GetTableName()) + 12); - strcat(strcpy(sval, Hc->GetTableName()), "."); + name= Hc->GetPartName(); + sval= (char*)PlugSubAlloc(g, NULL, strlen(name) + 12); + strcat(strcpy(sval, name), "."); n= strlen(sval); // Fold ftype to lower case @@ -559,10 +574,8 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode) #if defined(ZIP_SUPPORT) if (cmpr == 1) txfp = new(g) ZIPFAM(defp); - else { - strcpy(g->Message, "Compress 2 not supported yet"); - return NULL; - } // endelse + else + txfp = new(g) ZLBFAM(defp); #else // !ZIP_SUPPORT strcpy(g->Message, "Compress not supported"); return NULL; @@ -613,8 +626,10 @@ COLCRT::COLCRT(PSZ name) Offset = -1; Long = -1; Precision = -1; + Freq = -1; Key = -1; Scale = -1; + Opt = -1; DataType = '*'; } // end of COLCRT constructor for table creation @@ -628,8 +643,10 @@ COLCRT::COLCRT(void) Offset = 0; Long = 0; Precision = 0; + Freq = 0; Key = 0; Scale = 0; + Opt = 0; DataType = '*'; } // end of COLCRT constructor for table & view definition @@ -640,6 +657,14 @@ COLCRT::COLCRT(void) /***********************************************************************/ COLDEF::COLDEF(void) : COLCRT() { + To_Min = NULL; + To_Max = NULL; + To_Pos = NULL; + Xdb2 = FALSE; + To_Bmap = NULL; + To_Dval = NULL; + Ndv = 0; + Nbm = 0; Buf_Type = TYPE_ERROR; Clen = 0; Poff = 0; @@ -671,7 +696,9 @@ int COLDEF::Define(PGLOBAL g, void *memp, PCOLINFO cfp, int poff) Precision = cfp->Precision; Scale = cfp->Scale; Long = cfp->Length; + Opt = cfp->Opt; Key = cfp->Key; + Freq = cfp->Freq; if (cfp->Remark && *cfp->Remark) { Desc = (PSZ)PlugSubAlloc(g, memp, strlen(cfp->Remark) + 1); diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h index 29e4bf77f44..b6bd3cafc30 100644 --- a/storage/connect/reldef.h +++ b/storage/connect/reldef.h @@ -1,7 +1,7 @@ /*************** RelDef H Declares Source Code File (.H) ***************/ -/* Name: RELDEF.H Version 1.3 */ +/* Name: RELDEF.H Version 1.4 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ /* */ /* This file contains the DEF classes definitions. */ /***********************************************************************/ @@ -13,7 +13,7 @@ #include "catalog.h" #include "my_sys.h" -typedef class INDEXDEF *PIXDEF; +typedef class INDEXDEF *PIXDEF; typedef class ha_connect *PHC; /***********************************************************************/ @@ -42,6 +42,7 @@ class DllExport RELDEF : public BLOCK { // Relation definition block // Methods bool GetBoolCatInfo(PSZ what, bool bdef); bool SetIntCatInfo(PSZ what, int ival); + bool Partitioned(void); int GetIntCatInfo(PSZ what, int idef); int GetSizeCatInfo(PSZ what, PSZ sdef); int GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size); @@ -78,7 +79,7 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */ void SetNext(PTABDEF tdfp) {Next = tdfp;} int GetMultiple(void) {return Multiple;} int GetPseudo(void) {return Pseudo;} - PSZ GetPath(void) + PSZ GetPath(void) {return (Database) ? (PSZ)Database : Cat->GetDataPath();} bool SepIndex(void) {return GetBoolCatInfo("SepIndex", false);} bool IsReadOnly(void) {return Read_Only;} @@ -89,7 +90,7 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */ const CHARSET_INFO *data_charset() {return m_data_charset;} // Methods - int GetColCatInfo(PGLOBAL g); + int GetColCatInfo(PGLOBAL g); void SetIndexInfo(void); bool DropTable(PGLOBAL g, PSZ name); virtual bool Define(PGLOBAL g, PCATLG cat, LPCSTR name, LPCSTR am); @@ -156,6 +157,8 @@ class DllExport COLCRT : public BLOCK { /* Column description block PSZ GetName(void) {return Name;} PSZ GetDecode(void) {return Decode;} PSZ GetFmt(void) {return Fmt;} + int GetOpt(void) {return Opt;} + int GetFreq(void) {return Freq;} int GetLong(void) {return Long;} int GetPrecision(void) {return Precision;} int GetOffset(void) {return Offset;} @@ -172,6 +175,8 @@ class DllExport COLCRT : public BLOCK { /* Column description block int Key; /* Key (greater than 1 if multiple) */ int Precision; /* Logical column length */ int Scale; /* Decimals for float/decimal values */ + int Opt; /* 0:Not 1:clustered 2:sorted-asc 3:desc */ + int Freq; /* Estimated number of different values */ char DataType; /* Internal data type (C, N, F, T) */ }; // end of COLCRT @@ -193,12 +198,34 @@ class DllExport COLDEF : public COLCRT { /* Column description block int GetClen(void) {return Clen;} int GetType(void) {return Buf_Type;} int GetPoff(void) {return Poff;} + void *GetMin(void) {return To_Min;} + void SetMin(void *minp) {To_Min = minp;} + void *GetMax(void) {return To_Max;} + void SetMax(void *maxp) {To_Max = maxp;} + bool GetXdb2(void) {return Xdb2;} + void SetXdb2(bool b) {Xdb2 = b;} + void *GetBmap(void) {return To_Bmap;} + void SetBmap(void *bmp) {To_Bmap = bmp;} + void *GetDval(void) {return To_Dval;} + void SetDval(void *dvp) {To_Dval = dvp;} + int GetNdv(void) {return Ndv;} + void SetNdv(int ndv) {Ndv = ndv;} + int GetNbm(void) {return Nbm;} + void SetNbm(int nbm) {Nbm = nbm;} int Define(PGLOBAL g, void *memp, PCOLINFO cfp, int poff); void Define(PGLOBAL g, PCOL colp); bool IsSpecial(void) {return (Flags & U_SPECIAL) ? true : false;} bool IsVirtual(void) {return (Flags & U_VIRTUAL) ? true : false;} protected: + void *To_Min; /* Point to array of block min values */ + void *To_Max; /* Point to array of block max values */ + int *To_Pos; /* Point to array of block positions */ + bool Xdb2; /* TRUE if to be optimized by XDB2 */ + void *To_Bmap; /* To array of block bitmap values */ + void *To_Dval; /* To array of column distinct values */ + int Ndv; /* Number of distinct values */ + int Nbm; /* Number of ULONG in bitmap (XDB2) */ int Buf_Type; /* Internal data type */ int Clen; /* Internal data size in chars (bytes) */ int Poff; /* Calculated offset for Packed tables */ diff --git a/storage/connect/tabcol.h b/storage/connect/tabcol.h index fdee653207e..3bfc37e69c1 100644 --- a/storage/connect/tabcol.h +++ b/storage/connect/tabcol.h @@ -97,7 +97,7 @@ class DllExport COLUMN: public XOBJECT { // Column Name/Qualifier block. /***********************************************************************/ /* Definition of class SPCCOL with all its method functions. */ /* Note: Currently the special columns are ROWID, ROWNUM, FILEID, */ -/* SERVID, TABID, and CONID. */ +/* SERVID, TABID, PARTID, and CONID. */ /***********************************************************************/ class SPCCOL: public COLUMN { // Special Column Name/Qualifier block. public: diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index e66a84f2fa4..4a04b0c2db4 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -57,6 +57,8 @@ #include "tabdos.h" #include "tabfix.h" #include "tabmul.h" +#include "array.h" +#include "blkfil.h" /***********************************************************************/ /* DB static variables. */ @@ -64,6 +66,18 @@ int num_read, num_there, num_eq[2]; // Statistics extern "C" int trace; +/***********************************************************************/ +/* Size of optimize file header. */ +/***********************************************************************/ +#define NZ 4 + +/***********************************************************************/ +/* Min and Max blocks contains zero ended fields (blank = false). */ +/* No conversion of block values (check = true). */ +/***********************************************************************/ +PVBLK AllocValBlock(PGLOBAL, void *, int, int, int len = 0, int prec = 0, + bool check = true, bool blank = false, bool un = false); + /* --------------------------- Class DOSDEF -------------------------- */ /***********************************************************************/ @@ -81,6 +95,9 @@ DOSDEF::DOSDEF(void) Huge = false; Accept = false; Eof = false; + To_Pos = NULL; + Optimized = 0; + AllocBlks = 0; Compressed = 0; Lrecl = 0; AvgLen = 0; @@ -90,7 +107,6 @@ DOSDEF::DOSDEF(void) Maxerr = 0; ReadMode = 0; Ending = 0; -//Mtime = 0; } // end of DOSDEF constructor /***********************************************************************/ @@ -116,8 +132,8 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Compressed = GetIntCatInfo("Compressed", 0); Mapped = GetBoolCatInfo("Mapped", map); - Block = GetIntCatInfo("Blocks", 0); - Last = GetIntCatInfo("Last", 0); +//Block = GetIntCatInfo("Blocks", 0); +//Last = GetIntCatInfo("Last", 0); Ending = GetIntCatInfo("Ending", CRLF); if (Recfm == RECFM_FIX || Recfm == RECFM_BIN) { @@ -137,46 +153,62 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) return false; } // end of DefineAM -#if 0 /***********************************************************************/ -/* DeleteTableFile: Delete DOS/UNIX table files using platform API. */ -/* If the table file is protected (declared as read/only) we still */ -/* erase the the eventual optimize and index files but return true. */ +/* Get the full path/name of the optization file. */ /***********************************************************************/ -bool DOSDEF::DeleteTableFile(PGLOBAL g) +bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename) { - char filename[_MAX_PATH]; - bool rc = false; + char *ftype; - // Now delete the table file itself if not protected - if (!IsReadOnly()) { - rc = Erase(filename); - } else - rc =true; + switch (Recfm) { + case RECFM_VAR: ftype = ".dop"; break; + case RECFM_FIX: ftype = ".fop"; break; + case RECFM_BIN: ftype = ".bop"; break; + case RECFM_VCT: ftype = ".vop"; break; + case RECFM_DBF: ftype = ".dbp"; break; + default: + sprintf(g->Message, MSG(INVALID_FTYPE), Recfm); + return true; + } // endswitch Ftype - return rc; // Return true if error - } // end of DeleteTableFile + PlugSetPath(filename, Ofn, GetPath()); + strcat(PlugRemoveType(filename, filename), ftype); + return false; + } // end of GetOptFileName /***********************************************************************/ -/* Erase: This was made a separate routine because a strange thing */ -/* happened when DeleteTablefile was defined for the VCTDEF class: */ -/* when called from Catalog, the DOSDEF routine was still called even */ -/* when the class was VCTDEF. It also minimizes the specific code. */ +/* After an optimize error occured, remove all set optimize values. */ /***********************************************************************/ -bool DOSDEF::Erase(char *filename) +void DOSDEF::RemoveOptValues(PGLOBAL g) { - bool rc; - - PlugSetPath(filename, Fn, GetPath()); + char filename[_MAX_PATH]; + PCOLDEF cdp; + + // Delete settings of optimized columns + for (cdp = To_Cols; cdp; cdp = cdp->GetNext()) + if (cdp->GetOpt()) { + cdp->SetMin(NULL); + cdp->SetMax(NULL); + cdp->SetNdv(0); + cdp->SetNbm(0); + cdp->SetDval(NULL); + cdp->SetBmap(NULL); + } // endif Opt + + // Delete block position setting for not fixed tables + To_Pos = NULL; + AllocBlks = 0; + + // Delete any eventually ill formed non matching optimization file + if (!GetOptFileName(g, filename)) #if defined(WIN32) - rc = !DeleteFile(filename); + DeleteFile(filename); #else // UNIX - rc = remove(filename); -#endif // UNIX + remove(filename); +#endif // WIN32 - return rc; // Return true if error - } // end of Erase -#endif // 0 + Optimized = 0; + } // end of RemoveOptValues /***********************************************************************/ /* DeleteIndexFile: Delete DOS/UNIX index file(s) using platform API. */ @@ -191,7 +223,7 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) return false; // No index // If true indexes are in separate files - sep = GetBoolCatInfo("SepIndex", false); + sep = GetBoolCatInfo("SepIndex", false); if (!sep && pxdf) { strcpy(g->Message, MSG(NO_RECOV_SPACE)); @@ -221,6 +253,10 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) #endif char direc[_MAX_DIR]; char fname[_MAX_FNAME]; + bool all = !pxdf; + + if (all) + pxdf = To_Indx; for (; pxdf; pxdf = pxdf->GetNext()) { _splitpath(Ofn, drive, direc, fname, NULL); @@ -228,10 +264,16 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) _makepath(filename, drive, direc, fname, ftype); PlugSetPath(filename, filename, GetPath()); #if defined(WIN32) - rc |= !DeleteFile(filename); + if (!DeleteFile(filename)) + rc |= (GetLastError() != ERROR_FILE_NOT_FOUND); #else // UNIX - rc |= remove(filename); + if (remove(filename)) + rc |= (errno != ENOENT); #endif // UNIX + + if (!all) + break; + } // endfor pxdf } else { // !sep @@ -239,9 +281,11 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) PlugSetPath(filename, Ofn, GetPath()); strcat(PlugRemoveType(filename, filename), ftype); #if defined(WIN32) - rc = !DeleteFile(filename); + if (!DeleteFile(filename)) + rc = (GetLastError() != ERROR_FILE_NOT_FOUND); #else // UNIX - rc = remove(filename); + if (remove(filename)) + rc = (errno != ENOENT); #endif // UNIX } // endif sep @@ -314,10 +358,9 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) #if defined(ZIP_SUPPORT) if (Compressed == 1) txfp = new(g) ZIPFAM(this); - else { - strcpy(g->Message, "Compress 2 not supported yet"); - return NULL; - } // endelse + else + txfp = new(g) ZLBFAM(this); + #else // !ZIP_SUPPORT sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); return NULL; @@ -334,6 +377,36 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) if (Multiple) tdbp = new(g) TDBMUL(tdbp); + else + /*******************************************************************/ + /* For block tables, get eventually saved optimization values. */ + /*******************************************************************/ + if (tdbp->GetBlockValues(g)) { + PushWarning(g, tdbp); +// return NULL; // causes a crash when deleting index + } else if (Recfm == RECFM_VAR || Compressed > 1) { + if (IsOptimized()) { + if (map) { + txfp = new(g) MBKFAM(this); + } else if (Compressed) { +#if defined(ZIP_SUPPORT) + if (Compressed == 1) + txfp = new(g) ZBKFAM(this); + else { + txfp->SetBlkPos(To_Pos); + ((PZLBFAM)txfp)->SetOptimized(To_Pos != NULL); + } // endelse +#else + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif + } else + txfp = new(g) BLKFAM(this); + + ((PTDBDOS)tdbp)->SetTxfp(txfp); + } // endif Optimized + + } // endif Recfm return tdbp; } // end of GetTable @@ -353,7 +426,12 @@ TDBDOS::TDBDOS(PDOSDEF tdp, PTXF txfp) : TDBASE(tdp) AvgLen = tdp->AvgLen; Ftype = tdp->Recfm; To_Line = NULL; - Cardinal = -1; +//To_BlkIdx = NULL; + To_BlkFil = NULL; + SavFil = NULL; +//Xeval = 0; + Beval = 0; + Abort = false; } // end of TDBDOS standard constructor TDBDOS::TDBDOS(PGLOBAL g, PTDBDOS tdbp) : TDBASE(tdbp) @@ -363,7 +441,11 @@ TDBDOS::TDBDOS(PGLOBAL g, PTDBDOS tdbp) : TDBASE(tdbp) AvgLen = tdbp->AvgLen; Ftype = tdbp->Ftype; To_Line = tdbp->To_Line; - Cardinal = tdbp->Cardinal; +//To_BlkIdx = tdbp->To_BlkIdx; + To_BlkFil = tdbp->To_BlkFil; + SavFil = tdbp->SavFil; +//Xeval = tdbp->Xeval; + Beval = tdbp->Beval; } // end of TDBDOS copy constructor // Method @@ -406,14 +488,59 @@ void TDBDOS::PrintAM(FILE *f, char *m) /***********************************************************************/ /* Remake the indexes after the table was modified. */ /***********************************************************************/ -int TDBDOS::ResetTableOpt(PGLOBAL g, bool dox) +int TDBDOS::ResetTableOpt(PGLOBAL g, bool dop, bool dox) { - int rc = RC_OK; + int prc = RC_OK, rc = RC_OK; + + if (!GetFileLength(g)) { + // Void table, delete all opt and index files + PDOSDEF defp = (PDOSDEF)To_Def; + + defp->RemoveOptValues(g); + return (defp->DeleteIndexFile(g, NULL)) ? RC_INFO : RC_OK; + } // endif GetFileLength MaxSize = -1; // Size must be recalculated Cardinal = -1; // as well as Cardinality - if (dox) { + PTXF xp = Txfp; + + To_Filter = NULL; // Disable filtering +//To_BlkIdx = NULL; // and index filtering + To_BlkFil = NULL; // and block filtering + + // After the table was modified the indexes + // are invalid and we should mark them as such... + (void)((PDOSDEF)To_Def)->InvalidateIndex(g); + + if (dop) { + Columns = NULL; // Not used anymore + + if (Txfp->Blocked) { + // MakeBlockValues must be executed in non blocked mode + // except for ZLIB access method. + if (Txfp->GetAmType() == TYPE_AM_MAP) { + Txfp = new(g) MAPFAM((PDOSDEF)To_Def); +#if defined(ZIP_SUPPORT) + } else if (Txfp->GetAmType() == TYPE_AM_ZIP) { + Txfp = new(g) ZIPFAM((PDOSDEF)To_Def); + } else if (Txfp->GetAmType() == TYPE_AM_ZLIB) { + Txfp->Reset(); + ((PZLBFAM)Txfp)->SetOptimized(false); +#endif // ZIP_SUPPORT + } else if (Txfp->GetAmType() == TYPE_AM_BLK) + Txfp = new(g) DOSFAM((PDOSDEF)To_Def); + + Txfp->SetTdbp(this); + } else + Txfp->Reset(); + + Use = USE_READY; // So the table can be reopened + Mode = MODE_ANY; // Just to be clean + rc = MakeBlockValues(g); // Redo optimization + } // endif dop + + if (dox && (rc == RC_OK || rc == RC_INFO)) { // Remake eventual indexes if (Mode != MODE_UPDATE) To_SetCols = NULL; // Only used on Update @@ -422,21 +549,1025 @@ int TDBDOS::ResetTableOpt(PGLOBAL g, bool dox) Txfp->Reset(); // New start Use = USE_READY; // So the table can be reopened Mode = MODE_READ; // New mode + prc = rc; - if (!(PlgGetUser(g)->Check & CHK_OPT)) { - // After the table was modified the indexes - // are invalid and we should mark them as such... - rc = ((PDOSDEF)To_Def)->InvalidateIndex(g); - } else - // ... or we should remake them. + if (PlgGetUser(g)->Check & CHK_OPT) + // We must remake all indexes. rc = MakeIndex(g, NULL, false); + rc = (rc == RC_INFO) ? prc : rc; } // endif dox return rc; } // end of ResetTableOpt /***********************************************************************/ +/* Calculate the block sizes so block I/O can be used and also the */ +/* Min/Max values for clustered/sorted table columns. */ +/***********************************************************************/ +int TDBDOS::MakeBlockValues(PGLOBAL g) + { + int i, lg, nrec, rc, n = 0; + int curnum, curblk, block, last, savndv, savnbm; + void *savmin, *savmax; + bool blocked, xdb2 = false; +//POOLHEADER save; + PCOLDEF cdp; + PDOSDEF defp = (PDOSDEF)To_Def; + PDOSCOL colp = NULL; + PDBUSER dup = PlgGetUser(g); + PCATLG cat = defp->GetCat(); +//void *memp = cat->GetDescp(); + + if ((nrec = defp->GetElemt()) < 2) { + if (!To_Def->Partitioned()) { + // This may be wrong to do in some cases + strcpy(g->Message, MSG(TABLE_NOT_OPT)); + return RC_INFO; // Not to be optimized + } else + return RC_OK; + + } else if (GetMaxSize(g) == 0 || !(dup->Check & CHK_OPT)) { + // Suppress the opt file firstly if the table is void, + // secondly when it was modified with OPTIMIZATION unchecked + // because it is no more valid. + defp->RemoveOptValues(g); // Erase opt file + return RC_OK; // void table + } else if (MaxSize < 0) + return RC_FX; + + defp->SetOptimized(0); + + // Estimate the number of needed blocks + block = (int)((MaxSize + (int)nrec - 1) / (int)nrec); + + // We have to use local variables because Txfp->CurBlk is set + // to Rows+1 by unblocked variable length table access methods. + curblk = -1; + curnum = nrec - 1; + last = 0; + Txfp->Block = block; // This is useful mainly for + Txfp->CurBlk = curblk; // blocked tables (ZLBFAM), for + Txfp->CurNum = curnum; // others it is just to be clean. + + /*********************************************************************/ + /* Allocate the array of block starting positions. */ + /*********************************************************************/ +//if (memp) +// save = *(PPOOLHEADER)memp; + + Txfp->BlkPos = (int*)PlugSubAlloc(g, NULL, (block + 1) * sizeof(int)); + + /*********************************************************************/ + /* Allocate the blocks for clustered columns. */ + /*********************************************************************/ + blocked = Txfp->Blocked; // Save + Txfp->Blocked = true; // So column block can be allocated + + for (cdp = defp->GetCols(), i = 1; cdp; cdp = cdp->GetNext(), i++) + if (cdp->GetOpt()) { + lg = cdp->GetClen(); + + if (cdp->GetFreq() && cdp->GetFreq() <= dup->Maxbmp) { + cdp->SetXdb2(true); + savndv = cdp->GetNdv(); + cdp->SetNdv(0); // Reset Dval number of values + xdb2 = true; + savmax = cdp->GetDval(); + cdp->SetDval(PlugSubAlloc(g, NULL, cdp->GetFreq() * lg)); + savnbm = cdp->GetNbm(); + cdp->SetNbm(0); // Prevent Bmap allocation +// savmin = cdp->GetBmap(); +// cdp->SetBmap(PlugSubAlloc(g, NULL, block * sizeof(int))); + + if (trace) + htrc("Dval(%p) Bmap(%p) col(%d) %s Block=%d lg=%d\n", + cdp->GetDval(), cdp->GetBmap(), i, cdp->GetName(), block, lg); + + // colp will be initialized with proper Dval VALBLK + colp = (PDOSCOL)MakeCol(g, cdp, colp, i); + colp->InitValue(g); // Allocate column value buffer + cdp->SetNbm(savnbm); +// cdp->SetBmap(savmin); // Can be reused if the new size + cdp->SetDval(savmax); // is not greater than this one. + cdp->SetNdv(savndv); + } else { + cdp->SetXdb2(false); // Maxbmp may have been reset + savmin = cdp->GetMin(); + savmax = cdp->GetMax(); + cdp->SetMin(PlugSubAlloc(g, NULL, block * lg)); + cdp->SetMax(PlugSubAlloc(g, NULL, block * lg)); + + if (trace) + htrc("min(%p) max(%p) col(%d) %s Block=%d lg=%d\n", + cdp->GetMin(), cdp->GetMax(), i, cdp->GetName(), block, lg); + + // colp will be initialized with proper opt VALBLK's + colp = (PDOSCOL)MakeCol(g, cdp, colp, i); + colp->InitValue(g); // Allocate column value buffer + cdp->SetMin(savmin); // Can be reused if the number + cdp->SetMax(savmax); // of blocks does not change. + } // endif Freq + + } // endif Clustered + + // No optimised columns. Still useful for blocked variable tables. + if (!colp && defp->Recfm != RECFM_VAR) { + strcpy(g->Message, "No optimised columns"); + return RC_INFO; + } // endif colp + + Txfp->Blocked = blocked; + + /*********************************************************************/ + /* Now do calculate the optimization values. */ + /*********************************************************************/ + Mode = MODE_READ; + + if (OpenDB(g)) + return RC_FX; + + if (xdb2) { + /*********************************************************************/ + /* Retrieve the distinct values of XDB2 columns. */ + /*********************************************************************/ + if (GetDistinctColumnValues(g, nrec)) + return RC_FX; + + OpenDB(g); // Rewind the table file + } // endif xdb2 + +#if defined(PROG_INFO) + /*********************************************************************/ + /* Initialize progress information */ + /*********************************************************************/ + char *p = (char *)PlugSubAlloc(g, NULL, 24 + strlen(Name)); + + dup->Step = strcat(strcpy(p, MSG(OPTIMIZING)), Name); + dup->ProgMax = GetProgMax(g); + dup->ProgCur = 0; +#endif // SOCKET_MODE || THREAD + + /*********************************************************************/ + /* Make block starting pos and min/max values of cluster columns. */ + /*********************************************************************/ + while ((rc = ReadDB(g)) == RC_OK) { + if (blocked) { + // A blocked FAM class handles CurNum and CurBlk (ZLBFAM) + if (!Txfp->CurNum) + Txfp->BlkPos[Txfp->CurBlk] = Txfp->GetPos(); + + } else { + if (++curnum >= nrec) { + if (++curblk >= block) { + strcpy(g->Message, MSG(BAD_BLK_ESTIM)); + goto err; + } else + curnum = 0; + + // Get block starting position + Txfp->BlkPos[curblk] = Txfp->GetPos(); + } // endif CurNum + + last = curnum + 1; // curnum is zero based + Txfp->CurBlk = curblk; // Used in COLDOS::SetMinMax + Txfp->CurNum = curnum; // Used in COLDOS::SetMinMax + } // endif blocked + + /*******************************************************************/ + /* Now calculate the min and max values for the cluster columns. */ + /*******************************************************************/ + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->GetNext()) + if (colp->Clustered == 2) { + if (colp->SetBitMap(g)) + goto err; + + } else + if (colp->SetMinMax(g)) + goto err; // Currently: column is not sorted + +#if defined(PROG_INFO) + if (!dup->Step) { + strcpy(g->Message, MSG(OPT_CANCELLED)); + goto err; + } else + dup->ProgCur = GetProgCur(); +#endif // PROG_INFO + + n++; // Used to calculate block and last + } // endwhile + + if (rc == RC_EF) { + Txfp->Nrec = nrec; + +#if 0 // No good because Curblk and CurNum after EOF are different + // depending on whether the file is mapped or not mapped. + if (blocked) { +// Txfp->Block = Txfp->CurBlk + 1; + Txfp->Last = (Txfp->CurNum) ? Txfp->CurNum : nrec; +// Txfp->Last = (Txfp->CurNum) ? Txfp->CurNum + 1 : nrec; + Txfp->Block = Txfp->CurBlk + (Txfp->Last == nrec ? 0 : 1); + } else { + Txfp->Block = curblk + 1; + Txfp->Last = last; + } // endif blocked +#endif // 0 + + // New values of Block and Last + Txfp->Block = (n + nrec - 1) / nrec; + Txfp->Last = (n % nrec) ? (n % nrec) : nrec; + + // This is needed to be able to calculate the last block size + Txfp->BlkPos[Txfp->Block] = Txfp->GetNextPos(); + } else + goto err; + + /*********************************************************************/ + /* Save the optimization values for this table. */ + /*********************************************************************/ + if (!SaveBlockValues(g)) { + defp->Block = Txfp->Block; + defp->Last = Txfp->Last; + CloseDB(g); + defp->SetIntCatInfo("Blocks", Txfp->Block); + defp->SetIntCatInfo("Last", Txfp->Last); + return RC_OK; + } // endif SaveBlockValues + + err: + // Restore Desc memory suballocation +//if (memp) +// *(PPOOLHEADER)memp = save; + + defp->RemoveOptValues(g); + CloseDB(g); + return RC_FX; + } // end of MakeBlockValues + +/***********************************************************************/ +/* Save the block and Min/Max values for this table. */ +/* The problem here is to avoid name duplication, because more than */ +/* one data file can have the same name (but different types) and/or */ +/* the same data file can be used with different block sizes. This is */ +/* why we use Ofn that defaults to the file name but can be set to a */ +/* different name if necessary. */ +/***********************************************************************/ +bool TDBDOS::SaveBlockValues(PGLOBAL g) + { + char filename[_MAX_PATH]; + int lg, n[NZ + 2]; + size_t nbk, ndv, nbm, block = Txfp->Block; + bool rc = false; + FILE *opfile; + PDOSCOL colp; + PDOSDEF defp = (PDOSDEF)To_Def; + + if (defp->GetOptFileName(g, filename)) + return true; + + if (!(opfile = fopen(filename, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "wb", (int)errno, filename); + strcat(strcat(g->Message, ": "), strerror(errno)); + + if (trace) + htrc("%s\n", g->Message); + + return true; + } // endif opfile + + if (Ftype == RECFM_VAR || defp->Compressed == 2) { + /*******************************************************************/ + /* Write block starting positions into the opt file. */ + /*******************************************************************/ + block++; + lg = sizeof(int); + n[0] = Txfp->Last; n[1] = lg; n[2] = Txfp->Nrec; n[3] = Txfp->Block; + + if (fwrite(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(Txfp->BlkPos, lg, block, opfile) != block) { + sprintf(g->Message, MSG(OPTBLK_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + block--; // = Txfp->Block; + } // endif Ftype + + /*********************************************************************/ + /* Write the Min/Max values into the opt file. */ + /*********************************************************************/ + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->Next) { + lg = colp->Value->GetClen(); + + // Now start the writing process + if (colp->Clustered == 2) { + // New XDB2 block optimization. Will be recognized when reading + // because the column index is negated. + ndv = colp->Ndv; nbm = colp->Nbm; + nbk = nbm * block; + n[0] = -colp->Index; n[1] = lg; n[2] = Txfp->Nrec; n[3] = block; + n[4] = ndv; n[5] = nbm; + + if (fwrite(n, sizeof(int), NZ + 2, opfile) != NZ + 2) { + sprintf(g->Message, MSG(OPT_HEAD_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Dval->GetValPointer(), lg, ndv, opfile) != ndv) { + sprintf(g->Message, MSG(OPT_DVAL_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Bmap->GetValPointer(), sizeof(int), nbk, opfile) != nbk) { + sprintf(g->Message, MSG(OPT_BMAP_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + } else { + n[0] = colp->Index; n[1] = lg; n[2] = Txfp->Nrec; n[3] = block; + + if (fwrite(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Min->GetValPointer(), lg, block, opfile) != block) { + sprintf(g->Message, MSG(OPT_MIN_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Max->GetValPointer(), lg, block, opfile) != block) { + sprintf(g->Message, MSG(OPT_MAX_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + } // endif Clustered + + } // endfor colp + + fclose(opfile); + return rc; + } // end of SaveBlockValues + +/***********************************************************************/ +/* Read the Min/Max values for this table. */ +/* The problem here is to avoid name duplication, because more than */ +/* one data file can have the same name (but different types) and/or */ +/* the same data file can be used with different block sizes. This is */ +/* why we use Ofn that defaults to the file name but can be set to a */ +/* different name if necessary. */ +/***********************************************************************/ +bool TDBDOS::GetBlockValues(PGLOBAL g) + { + char filename[_MAX_PATH]; + int i, lg, n[NZ]; + int nrec, block = 0, last = 0, allocblk = 0; + int len; + bool newblk = false; + size_t ndv, nbm, nbk, blk; + FILE *opfile; + PCOLDEF cdp; + PDOSDEF defp = (PDOSDEF)To_Def; + PCATLG cat = defp->GetCat(); + +#if 0 + if (Mode == MODE_INSERT && Txfp->GetAmType() == TYPE_AM_DOS) + return false; +#endif // WIN32 + + if (defp->Optimized) + return false; // Already done or to be redone + + if (Ftype == RECFM_VAR || defp->Compressed == 2) { + /*******************************************************************/ + /* Variable length file that can be read by block. */ + /*******************************************************************/ + nrec = (defp->GetElemt()) ? defp->GetElemt() : 1; + + if (nrec > 1) { + // The table can be declared optimized if it is void. + // This is useful to handle Insert in optimized mode. + char filename[_MAX_PATH]; + int h; + int flen = -1; + + PlugSetPath(filename, defp->Fn, GetPath()); + h = open(filename, O_RDONLY); + flen = (h == -1 && errno == ENOENT) ? 0 : _filelength(h); + + if (h != -1) + close(h); + + if (!flen) { + defp->SetOptimized(1); + return false; + } // endif flen + + } else + return false; // Not optimisable + + cdp = defp->GetCols(); + i = 1; + } else { + /*******************************************************************/ + /* Fixed length file. Opt file exists only for clustered columns. */ + /*******************************************************************/ + // Check for existence of clustered columns + for (cdp = defp->GetCols(), i = 1; cdp; cdp = cdp->GetNext(), i++) + if (cdp->GetOpt()) + break; + + if (!cdp) + return false; // No optimization needed + + if ((len = Cardinality(g)) < 0) + return true; // Table error + else if (!len) + return false; // File does not exist yet + + block = Txfp->Block; // Was set in Cardinality + nrec = Txfp->Nrec; + } // endif Ftype + + if (defp->GetOptFileName(g, filename)) + return true; + + if (!(opfile = fopen(filename, "rb"))) + return false; // No saved values + + if (Ftype == RECFM_VAR || defp->Compressed == 2) { + /*******************************************************************/ + /* Read block starting positions from the opt file. */ + /*******************************************************************/ + lg = sizeof(int); + + if (fread(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (n[1] != lg || n[2] != nrec) { + sprintf(g->Message, MSG(OPT_NOT_MATCH), filename); + goto err; + } // endif + + last = n[0]; + block = n[3]; + blk = block + 1; + + defp->To_Pos = (int*)PlugSubAlloc(g, NULL, blk * lg); + + if (fread(defp->To_Pos, lg, blk, opfile) != blk) { + sprintf(g->Message, MSG(OPTBLK_RD_ERR), strerror(errno)); + goto err; + } // endif size + + } // endif Ftype + + /*********************************************************************/ + /* Read the Min/Max values from the opt file. */ + /*********************************************************************/ + for (; cdp; cdp = cdp->GetNext(), i++) + if (cdp->GetOpt()) { + lg = cdp->GetClen(); + blk = block; + + // Now start the reading process. + if (fread(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (n[0] == -i) { + // Read the XDB2 opt values from the opt file + if (n[1] != lg || n[2] != nrec || n[3] != block) { + sprintf(g->Message, MSG(OPT_NOT_MATCH), filename); + goto err; + } // endif + + if (fread(n, sizeof(int), 2, opfile) != 2) { + sprintf(g->Message, MSG(OPT_HEAD_RD_ERR), strerror(errno)); + goto err; + } // endif fread + + ndv = n[0]; nbm = n[1]; nbk = nbm * blk; + + if (cdp->GetNdv() < (int)ndv || !cdp->GetDval()) + cdp->SetDval(PlugSubAlloc(g, NULL, ndv * lg)); + + cdp->SetNdv((int)ndv); + + if (fread(cdp->GetDval(), lg, ndv, opfile) != ndv) { + sprintf(g->Message, MSG(OPT_DVAL_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (newblk || cdp->GetNbm() < (int)nbm || !cdp->GetBmap()) + cdp->SetBmap(PlugSubAlloc(g, NULL, nbk * sizeof(int))); + + cdp->SetNbm((int)nbm); + + if (fread(cdp->GetBmap(), sizeof(int), nbk, opfile) != nbk) { + sprintf(g->Message, MSG(OPT_BMAP_RD_ERR), strerror(errno)); + goto err; + } // endif size + + cdp->SetXdb2(true); + } else { + // Read the Min/Max values from the opt file + if (n[0] != i || n[1] != lg || n[2] != nrec || n[3] != block) { + sprintf(g->Message, MSG(OPT_NOT_MATCH), filename); + goto err; + } // endif + + if (newblk || !cdp->GetMin()) + cdp->SetMin(PlugSubAlloc(g, NULL, blk * lg)); + + if (fread(cdp->GetMin(), lg, blk, opfile) != blk) { + sprintf(g->Message, MSG(OPT_MIN_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (newblk || !cdp->GetMax()) + cdp->SetMax(PlugSubAlloc(g, NULL, blk * lg)); + + if (fread(cdp->GetMax(), lg, blk, opfile) != blk) { + sprintf(g->Message, MSG(OPT_MAX_RD_ERR), strerror(errno)); + goto err; + } // endif size + + cdp->SetXdb2(false); + } // endif n[0] (XDB2) + + } // endif Clustered + + defp->SetBlock(block); + defp->Last = last; // For Cardinality + defp->SetAllocBlks(block); + defp->SetOptimized(1); + fclose(opfile); + MaxSize = -1; // Can be refined later + return false; + + err: + defp->RemoveOptValues(g); + fclose(opfile); + + // Ignore error if not in mode CHK_OPT + return (PlgGetUser(g)->Check & CHK_OPT) != 0; + } // end of GetBlockValues + +/***********************************************************************/ +/* This fonction is used while making XDB2 block optimization. */ +/* It constructs for each elligible columns, the sorted list of the */ +/* distinct values existing in the column. This function uses an */ +/* algorithm that permit to get several sets of distinct values by */ +/* reading the table only once, which cannot be done using a standard */ +/* SQL query. */ +/***********************************************************************/ +bool TDBDOS::GetDistinctColumnValues(PGLOBAL g, int nrec) + { + char *p; + int rc, blk, n = 0; + PDOSCOL colp; + PDBUSER dup = PlgGetUser(g); + + /*********************************************************************/ + /* Initialize progress information */ + /*********************************************************************/ + p = (char *)PlugSubAlloc(g, NULL, 48 + strlen(Name)); + dup->Step = strcat(strcpy(p, MSG(GET_DIST_VALS)), Name); + dup->ProgMax = GetProgMax(g); + dup->ProgCur = 0; + + while ((rc = ReadDB(g)) == RC_OK) { + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->Next) + if (colp->Clustered == 2) + if (colp->AddDistinctValue(g)) + return true; // Too many distinct values + +#if defined(SOCKET_MODE) + if (SendProgress(dup)) { + strcpy(g->Message, MSG(OPT_CANCELLED)); + return true; + } else +#elif defined(THREAD) + if (!dup->Step) { + strcpy(g->Message, MSG(OPT_CANCELLED)); + return true; + } else +#endif // THREAD + dup->ProgCur = GetProgCur(); + + n++; + } // endwhile + + if (rc != RC_EF) + return true; + + // Reset the number of table blocks +//nrec = ((PDOSDEF)To_Def)->GetElemt(); (or default value) + blk = (n + nrec - 1) / nrec; + Txfp->Block = blk; // Useful mainly for ZLBFAM ??? + + // Set Nbm, Bmap for XDB2 columns + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->Next) + if (colp->Clustered == 2) { +// colp->Cdp->SetNdv(colp->Ndv); + colp->Nbm = (colp->Ndv + MAXBMP - 1) / MAXBMP; + colp->Bmap = AllocValBlock(g, NULL, TYPE_INT, colp->Nbm * blk); + } // endif Clustered + + return false; + } // end of GetDistinctColumnValues + +/***********************************************************************/ +/* Analyze the filter and construct the Block Evaluation Filter. */ +/* This is possible when a filter contains predicates implying a */ +/* column marked as "clustered" or "sorted" matched to a constant */ +/* argument. It is then possible by comparison against the smallest */ +/* and largest column values in each block to determine whether the */ +/* filter condition will be always true or always false for the block.*/ +/***********************************************************************/ +PBF TDBDOS::InitBlockFilter(PGLOBAL g, PFIL filp) + { + bool blk = Txfp->Blocked; + + if (To_BlkFil) + return To_BlkFil; // Already done + else if (!filp) + return NULL; + else if (blk) { + if (Txfp->GetAmType() == TYPE_AM_DBF) + /*****************************************************************/ + /* If RowID is used in this query, block optimization cannot be */ + /* used because currently the file must be read sequentially. */ + /*****************************************************************/ + for (PCOL cp = Columns; cp; cp = cp->GetNext()) + if (cp->GetAmType() == TYPE_AM_ROWID && !((RIDBLK*)cp)->GetRnm()) + return NULL; + + } // endif blk + + int i, op = filp->GetOpc(), opm = filp->GetOpm(), n = 0; + bool cnv[2]; + PCOL colp; + PXOB arg[2] = {NULL,NULL}; + PBF *fp = NULL, bfp = NULL; + + switch (op) { + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: + if (! opm) { + for (i = 0; i < 2; i++) { + arg[i] = filp->Arg(i); + cnv[i] = filp->Conv(i); + } // endfor i + + bfp = CheckBlockFilari(g, arg, op, cnv); + break; + } // endif !opm + + // if opm, pass thru + case OP_IN: + if (filp->GetArgType(0) == TYPE_COLBLK && + filp->GetArgType(1) == TYPE_ARRAY) { + arg[0] = filp->Arg(0); + arg[1] = filp->Arg(1); + colp = (PCOL)arg[0]; + + if (colp->GetTo_Tdb() == this) { + // Block evaluation is possible for... + if (colp->GetAmType() == TYPE_AM_ROWID) { + // Special column ROWID and constant array, but + // currently we don't know how to retrieve a RowID + // from a DBF table that is not sequentially read. +// if (Txfp->GetAmType() != TYPE_AM_DBF || +// ((RIDBLK*)arg[0])->GetRnm()) + bfp = new(g) BLKSPCIN(g, this, op, opm, arg, Txfp->Nrec); + + } else if (blk && Txfp->Nrec > 1 && colp->IsClustered()) + // Clustered column and constant array + if (colp->GetClustered() == 2) + bfp = new(g) BLKFILIN2(g, this, op, opm, arg); + else + bfp = new(g) BLKFILIN(g, this, op, opm, arg); + + } // endif this + +#if 0 + } else if (filp->GetArgType(0) == TYPE_SCALF && + filp->GetArgType(1) == TYPE_ARRAY) { + arg[0] = filp->Arg(0); + arg[1] = filp->Arg(1); + + if (((PSCALF)arg[0])->GetOp() == OP_ROW && + arg[1]->GetResultType() == TYPE_LIST) { + PARRAY par = (PARRAY)arg[1]; + LSTVAL *vlp = (LSTVAL*)par->GetValue(); + + ((SFROW*)arg[0])->GetParms(n); + + if (n != vlp->GetN()) + return NULL; + else + n = par->GetNval(); + + arg[1] = new(g) CONSTANT(vlp); + fp = (PBF*)PlugSubAlloc(g, NULL, n * sizeof(PBF)); + cnv[0] = cnv[1] = false; + + if (op == OP_IN) + op = OP_EQ; + + for (i = 0; i < n; i++) { + par->GetNthValue(vlp, i); + + if (!(fp[i] = CheckBlockFilari(g, arg, op, cnv))) + return NULL; + + } // endfor i + + bfp = new(g) BLKFILLOG(this, (opm == 2 ? OP_AND : OP_OR), fp, n); + } // endif ROW +#endif // 0 + + } // endif Type + + break; + case OP_AND: + case OP_OR: + fp = (PBF*)PlugSubAlloc(g, NULL, 2 * sizeof(PBF)); + fp[0] = InitBlockFilter(g, (PFIL)(filp->Arg(0))); + fp[1] = InitBlockFilter(g, (PFIL)(filp->Arg(1))); + + if (fp[0] || fp[1]) + bfp = new(g) BLKFILLOG(this, op, fp, 2); + + break; + case OP_NOT: + fp = (PBF*)PlugSubAlloc(g, NULL, sizeof(PBF)); + + if ((*fp = InitBlockFilter(g, (PFIL)(filp->Arg(0))))) + bfp = new(g) BLKFILLOG(this, op, fp, 1); + + break; + case OP_LIKE: + default: + break; + } // endswitch op + + return bfp; + } // end of InitBlockFilter + +/***********************************************************************/ +/* Analyze the passed arguments and construct the Block Filter. */ +/***********************************************************************/ +PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv) + { +//int i, n1, n2, ctype = TYPE_ERROR, n = 0, type[2] = {0,0}; +//bool conv = false, xdb2 = false, ok = false, b[2]; +//PXOB *xarg1, *xarg2 = NULL, xp[2]; + int i, ctype = TYPE_ERROR, n = 0, type[2] = {0,0}; + bool conv = false, xdb2 = false, ok = false; + PXOB *xarg2 = NULL, xp[2]; + PCOL colp; +//LSTVAL *vlp = NULL; +//SFROW *sfr[2]; + PBF *fp = NULL, bfp = NULL; + + for (i = 0; i < 2; i++) { + switch (arg[i]->GetType()) { + case TYPE_CONST: + type[i] = 1; + ctype = arg[i]->GetResultType(); + break; + case TYPE_COLBLK: + conv = cnv[i]; + colp = (PCOL)arg[i]; + + if (colp->GetTo_Tdb() == this) { + if (colp->GetAmType() == TYPE_AM_ROWID) { + // Currently we don't know how to retrieve a RowID + // from a DBF table that is not sequentially read. +// if (Txfp->GetAmType() != TYPE_AM_DBF || +// ((RIDBLK*)arg[i])->GetRnm()) + type[i] = 5; + + } else if (Txfp->Blocked && Txfp->Nrec > 1 && + colp->IsClustered()) { + type[i] = 2; + xdb2 = colp->GetClustered() == 2; + } // endif Clustered + + } else if (colp->GetColUse(U_CORREL)) { + // This is a column pointing to the outer query of a + // correlated subquery, it has a constant value during + // each execution of the subquery. + type[i] = 1; + ctype = arg[i]->GetResultType(); + } // endif this + + break; +// case TYPE_SCALF: +// if (((PSCALF)arg[i])->GetOp() == OP_ROW) { +// sfr[i] = (SFROW*)arg[i]; +// type[i] = 7; +// } // endif Op + +// break; + default: + break; + } // endswitch ArgType + + if (!type[i]) + break; + + n += type[i]; + } // endfor i + + if (n == 3 || n == 6) { + if (conv) { + // The constant has not the good type and will not match + // the block min/max values. Warn and abort. + sprintf(g->Message, "Block opt: %s", MSG(VALTYPE_NOMATCH)); + PushWarning(g, this); + return NULL; + } // endif Conv + + if (type[0] == 1) { + // Make it always as Column-op-Value + *xp = arg[0]; + arg[0] = arg[1]; + arg[1] = *xp; + + switch (op) { + case OP_GT: op = OP_LT; break; + case OP_GE: op = OP_LE; break; + case OP_LT: op = OP_GT; break; + case OP_LE: op = OP_GE; break; + } // endswitch op + + } // endif + +#if defined(_DEBUG) +// assert(arg[0]->GetResultType() == ctype); +#endif + + if (n == 3) { + if (xdb2) { + if (((PDOSCOL)arg[0])->GetNbm() == 1) + bfp = new(g) BLKFILAR2(g, this, op, arg); + else // Multiple bitmap made of several ULONG's + bfp = new(g) BLKFILMR2(g, this, op, arg); + } else + bfp = new(g) BLKFILARI(g, this, op, arg); + + } else // n = 6 + bfp = new(g) BLKSPCARI(this, op, arg, Txfp->Nrec); + +#if 0 + } else if (n == 8 || n == 14) { + if (n == 8 && ctype != TYPE_LIST) { + // Should never happen + strcpy(g->Message, "Block opt: bad constant"); + longjmp(g->jumper[g->jump_level], 99); + } // endif Conv + + if (type[0] == 1) { + // Make it always as Column-op-Value + sfr[0] = sfr[1]; + arg[1] = arg[0]; + + switch (op) { + case OP_GT: op = OP_LT; break; + case OP_GE: op = OP_LE; break; + case OP_LT: op = OP_GT; break; + case OP_LE: op = OP_GE; break; + } // endswitch op + + } // endif + + xarg1 = sfr[0]->GetParms(n1); + + if (n == 8) { + vlp = (LSTVAL*)arg[1]->GetValue(); + n2 = vlp->GetN(); + xp[1] = new(g) CONSTANT((PVAL)NULL); + } else + xarg2 = sfr[1]->GetParms(n2); + + if (n1 != n2) + return NULL; // Should we flag an error ? + + fp = (PBF*)PlugSubAlloc(g, NULL, n1 * sizeof(PBF)); + + for (i = 0; i < n1; i++) { + xp[0] = xarg1[i]; + + if (n == 8) + ((CONSTANT*)xp[1])->SetValue(vlp->GetSubVal(i)); + else + xp[1] = xarg2[i]; + + b[0] = b[1] = (xp[0]->GetResultType() != xp[1]->GetResultType()); + ok |= ((fp[i] = CheckBlockFilari(g, xp, op, b)) != NULL); + } // endfor i + + if (ok) + bfp = new(g) BLKFILLOG(this, OP_AND, fp, n1); +#endif // 0 + + } // endif n + + return bfp; + } // end of CheckBlockFilari + +/***********************************************************************/ +/* ResetBlkFil: reset the block filter and restore filtering, or make */ +/* the block filter if To_Filter was not set when opening the table. */ +/***********************************************************************/ +void TDBDOS::ResetBlockFilter(PGLOBAL g) + { + if (!To_BlkFil) { + if (To_Filter) + if ((To_BlkFil = InitBlockFilter(g, To_Filter))) { + htrc("BlkFil=%p\n", To_BlkFil); + MaxSize = -1; // To be recalculated + } // endif To_BlkFil + + return; + } // endif To_BlkFil + + To_BlkFil->Reset(g); + + if (SavFil && !To_Filter) { + // Restore filter if it was disabled by optimization + To_Filter = SavFil; + SavFil = NULL; + } // endif + + Beval = 0; + } // end of ResetBlockFilter + +/***********************************************************************/ +/* Block optimization: evaluate the block index filter against */ +/* the min and max values of this block and return: */ +/* RC_OK: if some records in the block can meet filter criteria. */ +/* RC_NF: if no record in the block can meet filter criteria. */ +/* RC_EF: if no record in the remaining file can meet filter criteria.*/ +/* In addition, temporarily supress filtering if all the records in */ +/* the block meet filter criteria. */ +/***********************************************************************/ +int TDBDOS::TestBlock(PGLOBAL g) + { + int rc = RC_OK; + + if (To_BlkFil && Beval != 2) { + // Check for block filtering evaluation + if (Beval == 1) { + // Filter was removed for last block, restore it + To_Filter = SavFil; + SavFil = NULL; + } // endif Beval + + // Check for valid records in new block + switch (Beval = To_BlkFil->BlockEval(g)) { + case -2: // No more valid values in file + rc = RC_EF; + break; + case -1: // No valid values in block + rc = RC_NF; + break; + case 1: // All block values are valid + case 2: // All subsequent file values are Ok + // Before suppressing the filter for the block(s) it is + // necessary to reset the filtered columns to NOT_READ + // so their new values are retrieved by the SELECT list. + if (To_Filter) // Can be NULL when externally called (XDB) + To_Filter->Reset(); + + SavFil = To_Filter; + To_Filter = NULL; // So remove filter + } // endswitch Beval + + if (trace) + htrc("BF Eval Beval=%d\n", Beval); + + } // endif To_BlkFil + + return rc; + } // end of TestBlock + +/***********************************************************************/ /* Check whether we have to create/update permanent indexes. */ /***********************************************************************/ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) @@ -454,7 +1585,13 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) Mode = MODE_READ; Use = USE_READY; dfp = (PDOSDEF)To_Def; - fixed = Cardinality(g) >= 0; + + if (!Cardinality(g)) { + // Void table erase eventual index file(s) + (void)dfp->DeleteIndexFile(g, NULL); + return RC_OK; + } else + fixed = Ftype != RECFM_VAR; // Are we are called from CreateTable or CreateIndex? if (pxdf) { @@ -482,7 +1619,7 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) // Allocate all columns that will be used by indexes. // This must be done before opening the table so specific - // column initialization can be done ( in particular by TDBVCT) + // column initialization can be done (in particular by TDBVCT) for (n = 0, xdp = pxdf; xdp; xdp = xdp->GetNext()) for (kdp = xdp->GetToKeyParts(); kdp; kdp = kdp->GetNext()) { if (!(colp = ColDB(g, kdp->GetName(), 0))) { @@ -580,6 +1717,107 @@ err: } // end of MakeIndex /***********************************************************************/ +/* Make a dynamic index. */ +/***********************************************************************/ +bool TDBDOS::InitialyzeIndex(PGLOBAL g, PIXDEF xdp) + { + int k, rc; + bool brc, dynamic; + PCOL colp; + PCOLDEF cdp; + PVAL valp; + PXLOAD pxp; + PKXBASE kxp; + PKPDEF kdp; + + if (!xdp && !(xdp = To_Xdp)) { + strcpy(g->Message, "NULL dynamic index"); + return true; + } else + dynamic = To_Filter && xdp->IsUnique() && xdp->IsDynamic(); +// dynamic = To_Filter && xdp->IsDynamic(); NIY + + // Allocate the key columns definition block + Knum = xdp->GetNparts(); + To_Key_Col = (PCOL*)PlugSubAlloc(g, NULL, Knum * sizeof(PCOL)); + + // Get the key column description list + for (k = 0, kdp = xdp->GetToKeyParts(); kdp; kdp = kdp->GetNext()) + if (!(colp = ColDB(g, kdp->GetName(), 0)) || colp->InitValue(g)) { + sprintf(g->Message, "Wrong column %s", kdp->GetName()); + return true; + } else + To_Key_Col[k++] = colp; + +#if defined(_DEBUG) + if (k != Knum) { + sprintf(g->Message, "Key part number mismatch for %s", + xdp->GetName()); + return 0; + } // endif k +#endif // _DEBUG + + // Allocate the pseudo constants that will contain the key values + To_Link = (PXOB*)PlugSubAlloc(g, NULL, Knum * sizeof(PXOB)); + + for (k = 0, kdp = xdp->GetToKeyParts(); kdp; k++, kdp = kdp->GetNext()) { + cdp = Key(k)->GetCdp(); + valp = AllocateValue(g, cdp->GetType(), cdp->GetLength()); + To_Link[k]= new(g) CONSTANT(valp); + } // endfor k + + // Make the index on xdp + if (!xdp->IsAuto()) { + if (!dynamic) { + if (((PDOSDEF)To_Def)->Huge) + pxp = new(g) XHUGE; + else + pxp = new(g) XFILE; + + } else + pxp = NULL; + + if (Knum == 1) // Single index + kxp = new(g) XINDXS(this, xdp, pxp, To_Key_Col, To_Link); + else // Multi-Column index + kxp = new(g) XINDEX(this, xdp, pxp, To_Key_Col, To_Link); + + } else // Column contains same values as ROWID + kxp = new(g) XXROW(this); + + // Prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif + + if (!(rc = setjmp(g->jumper[++g->jump_level])) != 0) { + if (dynamic) { + ResetBlockFilter(g); + kxp->SetDynamic(dynamic); + brc = kxp->Make(g, xdp); + } else + brc = kxp->Init(g); + + if (!brc) { + if (Txfp->GetAmType() == TYPE_AM_BLK) { + // Cannot use indexing in DOS block mode + Txfp = new(g) DOSFAM((PBLKFAM)Txfp, (PDOSDEF)To_Def); + Txfp->AllocateBuffer(g); + To_BlkFil = NULL; + } // endif AmType + + To_Kindex= kxp; + } // endif brc + + } else + brc = true; + + g->jump_level--; + return brc; + } // end of InitialyzeIndex + +/***********************************************************************/ /* DOS GetProgMax: get the max value for progress information. */ /***********************************************************************/ int TDBDOS::GetProgMax(PGLOBAL g) @@ -619,11 +1857,82 @@ int TDBDOS::RowNumber(PGLOBAL g, bool b) /***********************************************************************/ int TDBDOS::Cardinality(PGLOBAL g) { + int n = Txfp->Cardinality(NULL); + if (!g) - return Txfp->Cardinality(g); + return (Mode == MODE_ANY) ? 1 : n; + + if (Cardinal < 0) { + if (!Txfp->Blocked && n == 0) { + // Info command, we try to return exact row number + PDOSDEF dfp = (PDOSDEF)To_Def; + PIXDEF xdp = dfp->To_Indx; + + if (xdp && xdp->IsValid()) { + // Cardinality can be retreived from one index + PXLOAD pxp; + + if (dfp->Huge) + pxp = new(g) XHUGE; + else + pxp = new(g) XFILE; + + PXINDEX kxp = new(g) XINDEX(this, xdp, pxp, NULL, NULL); + + if (!(kxp->GetAllSizes(g, Cardinal))) + return Cardinal; + + } // endif Mode + + if (Mode == MODE_ANY) { + // Using index impossible or failed, do it the hard way + Mode = MODE_READ; + To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); + + if (Txfp->OpenTableFile(g)) + return (Cardinal = Txfp->Cardinality(g)); + + for (Cardinal = 0; n != RC_EF;) + if (!(n = Txfp->ReadBuffer(g))) + Cardinal++; + + Txfp->CloseTableFile(g, false); + Mode = MODE_ANY; + } else { + // Return the best estimate + int len = GetFileLength(g); + + if (len >= 0) { + int rec; + + if (trace) + htrc("Estimating lines len=%d ending=%d/n", + len, ((PDOSDEF)To_Def)->Ending); + + /*************************************************************/ + /* Estimate the number of lines in the table (if not known) */ + /* by dividing the file length by the average record length. */ + /*************************************************************/ + rec = ((PDOSDEF)To_Def)->Ending; + + if (AvgLen <= 0) // No given average estimate + rec += EstimatedLength(g); + else // An estimate was given for the average record length + rec += AvgLen; + + Cardinal = (len + rec - 1) / rec; + + if (trace) + htrc("avglen=%d MaxSize%d\n", rec, Cardinal); + + } // endif len - if (Cardinal < 0) - Cardinal = Txfp->Cardinality(g); + } // endif Mode + + } else + Cardinal = Txfp->Cardinality(g); + + } // endif Cardinal return Cardinal; } // end of Cardinality @@ -641,30 +1950,21 @@ int TDBDOS::GetMaxSize(PGLOBAL g) int len = GetFileLength(g); if (len >= 0) { + int rec; + if (trace) - htrc("Estimating lines len=%d ending=%d\n", + htrc("Estimating lines len=%d ending=%d/n", len, ((PDOSDEF)To_Def)->Ending); /*****************************************************************/ /* Estimate the number of lines in the table (if not known) by */ - /* dividing the file length by the minimum line length assuming */ - /* only the last column can be of variable length. This will be */ - /* a ceiling estimate (as last column is never totally absent). */ + /* dividing the file length by minimum record length. */ /*****************************************************************/ - int rec = ((PDOSDEF)To_Def)->Ending; // +2: CRLF +1: LF - - if (AvgLen <= 0) // No given average estimate - rec += EstimatedLength(g); - else // A lower estimate was given for the average record length - rec += (int)AvgLen; - - if (trace) - htrc(" Filen=%d min_rec=%d\n", len, rec); - + rec = EstimatedLength(g) + ((PDOSDEF)To_Def)->Ending; MaxSize = (len + rec - 1) / rec; if (trace) - htrc(" Estimated max_K=%d\n", MaxSize); + htrc("avglen=%d MaxSize%d\n", rec, MaxSize); } // endif len @@ -723,7 +2023,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) Txfp->Rewind(); // see comment in Work.log if (SkipHeader(g)) - return TRUE; + return true; } else /*****************************************************************/ @@ -731,6 +2031,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) /*****************************************************************/ To_Kindex->Reset(); + ResetBlockFilter(g); return false; } // endif use @@ -750,7 +2051,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) else if (Txfp->GetAmType() == TYPE_AM_ZIP) Txfp = new(g) ZIPFAM((PDOSDEF)To_Def); #endif // ZIP_SUPPORT - else if (Txfp->GetAmType() != TYPE_AM_DOS) + else // if (Txfp->GetAmType() != TYPE_AM_DOS) ??? Txfp = new(g) DOSFAM((PDOSDEF)To_Def); Txfp->SetTdbp(this); @@ -767,6 +2068,11 @@ bool TDBDOS::OpenDB(PGLOBAL g) Use = USE_OPEN; // Do it now in case we are recursively called /*********************************************************************/ + /* Allocate the block filter tree if evaluation is possible. */ + /*********************************************************************/ + To_BlkFil = InitBlockFilter(g, To_Filter); + + /*********************************************************************/ /* Allocate the line buffer plus a null character. */ /*********************************************************************/ To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); @@ -894,7 +2200,7 @@ void TDBDOS::CloseDB(PGLOBAL g) To_Kindex = NULL; } // endif - Txfp->CloseTableFile(g); + Txfp->CloseTableFile(g, Abort); } // end of CloseDB // ------------------------ DOSCOL functions ---------------------------- @@ -923,6 +2229,40 @@ DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) Deplac = cdp->GetOffset(); Long = cdp->GetLong(); To_Val = NULL; + Clustered = 0; + Sorted = 0; + Ndv = 0; // Currently used only for XDB2 + Nbm = 0; // Currently used only for XDB2 + Min = NULL; + Max = NULL; + Bmap = NULL; + Dval = NULL; + Buf = NULL; + + if (txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) { + int nblk = txfp->GetBlock(); + + Clustered = (cdp->GetXdb2()) ? 2 : 1; + Sorted = (cdp->GetOpt() > 1) ? 1 : 0; // Currently ascending only + + if (Clustered == 1) { + Min = AllocValBlock(g, cdp->GetMin(), Buf_Type, nblk, Long, prec); + Max = AllocValBlock(g, cdp->GetMax(), Buf_Type, nblk, Long, prec); + } else { // Clustered == 2 + // Ndv is the number of distinct values in Dval. Ndv and Nbm + // may be 0 when optimizing because Ndval is not filled yet, + // but the size of the passed Dval memory block is Ok. + Ndv = cdp->GetNdv(); + Dval = AllocValBlock(g, cdp->GetDval(), Buf_Type, Ndv, Long, prec); + + // Bmap cannot be allocated when optimizing, we must know Nbm first + if ((Nbm = cdp->GetNbm())) + Bmap = AllocValBlock(g, cdp->GetBmap(), TYPE_INT, Nbm * nblk); + + } // endif Clustered + + } // endif Opt + OldVal = NULL; // Currently used only in MinMax Dsp = 0; Ldz = false; @@ -970,9 +2310,39 @@ DOSCOL::DOSCOL(DOSCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp) Dcm = col1->Dcm; OldVal = col1->OldVal; Buf = col1->Buf; + Clustered = col1->Clustered; + Sorted = col1->Sorted; + Min = col1->Min; + Max = col1->Max; + Bmap = col1->Bmap; + Dval = col1->Dval; + Ndv = col1->Ndv; + Nbm = col1->Nbm; } // end of DOSCOL copy constructor /***********************************************************************/ +/* VarSize: This function tells UpdateDB whether or not the block */ +/* optimization file must be redone if this column is updated, even */ +/* it is not sorted or clustered. This applies to the last column of */ +/* a variable length table that is blocked, because if it is updated */ +/* using a temporary file, the block size may be modified. */ +/***********************************************************************/ +bool DOSCOL::VarSize(void) + { + PTDBDOS tdbp = (PTDBDOS)To_Tdb; + PTXF txfp = tdbp->Txfp; + + if (Cdp && !Cdp->GetNext() // Must be the last column + && tdbp->Ftype == RECFM_VAR // of a DOS variable length + && txfp->Blocked // blocked table + && txfp->GetUseTemp()) // using a temporary file. + return true; + else + return false; + + } // end VarSize + +/***********************************************************************/ /* SetBuffer: prepare a column block for write operation. */ /***********************************************************************/ bool DOSCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) @@ -1260,6 +2630,144 @@ void DOSCOL::WriteColumn(PGLOBAL g) } // end of WriteColumn /***********************************************************************/ +/* SetMinMax: Calculate minimum and maximum values for one block. */ +/* Note: TYPE_STRING is stored and processed with zero ended strings */ +/* to be matching the way the FILTER Eval function processes them. */ +/***********************************************************************/ +bool DOSCOL::SetMinMax(PGLOBAL g) + { + PTDBDOS tp = (PTDBDOS)To_Tdb; + + ReadColumn(g); // Extract column value from current line + + if (CheckSorted(g)) + return true; + + if (!tp->Txfp->CurNum) { + Min->SetValue(Value, tp->Txfp->CurBlk); + Max->SetValue(Value, tp->Txfp->CurBlk); + } else { + Min->SetMin(Value, tp->Txfp->CurBlk); + Max->SetMax(Value, tp->Txfp->CurBlk); + } // endif CurNum + + return false; + } // end of SetMinMax + +/***********************************************************************/ +/* SetBitMap: Calculate the bit map of existing values in one block. */ +/* Note: TYPE_STRING is processed with zero ended strings */ +/* to be matching the way the FILTER Eval function processes them. */ +/***********************************************************************/ +bool DOSCOL::SetBitMap(PGLOBAL g) + { + int i, m, n; + uint *bmp; + PTDBDOS tp = (PTDBDOS)To_Tdb; + PDBUSER dup = PlgGetUser(g); + + n = tp->Txfp->CurNum; + bmp = (uint*)Bmap->GetValPtr(Nbm * tp->Txfp->CurBlk); + + // Extract column value from current line + ReadColumn(g); + + if (CheckSorted(g)) + return true; + + if (!n) // New block + for (m = 0; m < Nbm; m++) + bmp[m] = 0; // Reset the new bit map + + if ((i = Dval->Find(Value)) < 0) { + char buf[32]; + + sprintf(g->Message, MSG(DVAL_NOTIN_LIST), + Value->GetCharString(buf), Name); + return true; + } else if (i >= dup->Maxbmp) { + sprintf(g->Message, MSG(OPT_LOGIC_ERR), i); + return true; + } else { + m = i / MAXBMP; +#if defined(_DEBUG) + assert (m < Nbm); +#endif // _DEBUG + bmp[m] |= (1 << (i % MAXBMP)); + } // endif's i + + return false; + } // end of SetBitMap + +/***********************************************************************/ +/* Checks whether a column declared as sorted is sorted indeed. */ +/***********************************************************************/ +bool DOSCOL::CheckSorted(PGLOBAL g) + { + if (Sorted) + if (OldVal) { + // Verify whether this column is sorted all right + if (OldVal->CompareValue(Value) > 0) { + // Column is no more in ascending order + sprintf(g->Message, MSG(COL_NOT_SORTED), Name, To_Tdb->GetName()); + Sorted = false; + return true; + } else + OldVal->SetValue_pval(Value); + + } else + OldVal = AllocateValue(g, Value); + + return false; + } // end of CheckSorted + +/***********************************************************************/ +/* AddDistinctValue: Check whether this value already exist in the */ +/* list and if not add it to the distinct values list. */ +/***********************************************************************/ +bool DOSCOL::AddDistinctValue(PGLOBAL g) + { + bool found = false; + int i, m, n; + + ReadColumn(g); // Extract column value from current line + + // Perhaps a better algorithm can be used when Ndv gets bigger + // Here we cannot use Find because we must get the index of where + // to insert a new value if it is not found in the array. + for (n = 0; n < Ndv; n++) { + m = Dval->CompVal(Value, n); + + if (m > 0) + continue; + else if (!m) + found = true; // Already there + + break; + } // endfor n + + if (!found) { + // Check whether we have room for an additional value + if (Ndv == Freq) { + // Too many values because of wrong Freq setting + sprintf(g->Message, MSG(BAD_FREQ_SET), Name); + return true; + } // endif Ndv + + // New value, add it to the list before the nth value + Dval->SetNval(Ndv + 1); + + for (i = Ndv; i > n; i--) + Dval->Move(i - 1, i); + + Dval->SetValue(Value, n); + Ndv++; + } // endif found + + return false; + } // end of AddDistinctValue + +/***********************************************************************/ /* Make file output of a Dos column descriptor block. */ /***********************************************************************/ void DOSCOL::Print(PGLOBAL g, FILE *f, uint n) diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index 79a2659fb70..7f2c4f5e7ee 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -12,9 +12,12 @@ #include "xtable.h" // Table base class declares #include "colblk.h" // Column base class declares #include "xindex.h" +#include "filter.h" //pedef struct _tabdesc *PTABD; // For friend setting typedef class TXTFAM *PTXF; +typedef class BLOCKFILTER *PBF; +typedef class BLOCKINDEX *PBX; /***********************************************************************/ /* DOS table. */ @@ -47,6 +50,11 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ bool GetEof(void) {return Eof;} int GetBlksize(void) {return Blksize;} int GetEnding(void) {return Ending;} + bool IsOptimized(void) {return (Optimized == 1);} + void SetOptimized(int opt) {Optimized = opt;} + void SetAllocBlks(int blks) {AllocBlks = blks;} + int GetAllocBlks(void) {return AllocBlks;} + int *GetTo_Pos(void) {return To_Pos;} // Methods virtual int Indexable(void) @@ -55,6 +63,8 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); virtual PTDB GetTable(PGLOBAL g, MODE mode); bool InvalidateIndex(PGLOBAL g); + bool GetOptFileName(PGLOBAL g, char *filename); + void RemoveOptValues(PGLOBAL g); protected: //virtual bool Erase(char *filename); @@ -69,6 +79,9 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ bool Huge; /* true for files larger than 2GB */ bool Accept; /* true if wrong lines are accepted (DBF)*/ bool Eof; /* true if an EOF (0xA) character exists */ + int *To_Pos; /* To array of block starting positions */ + int Optimized; /* 0: No, 1:Yes, 2:Redo optimization */ + int AllocBlks; /* Number of suballocated opt blocks */ int Compressed; /* 0: No, 1: gz, 2:zlib compressed file */ int Lrecl; /* Size of biggest record */ int AvgLen; /* Average size of records */ @@ -112,6 +125,7 @@ class DllExport TDBDOS : public TDBASE { virtual AMT GetAmType(void) {return Txfp->GetAmType();} virtual PSZ GetFile(PGLOBAL g) {return Txfp->To_File;} virtual void SetFile(PGLOBAL g, PSZ fn) {Txfp->To_File = fn;} + virtual void SetAbort(bool b) {Abort = b;} virtual RECFM GetFtype(void) {return Ftype;} virtual bool SkipHeader(PGLOBAL g) {return false;} virtual void RestoreNrec(void) {Txfp->SetNrec(1);} @@ -123,7 +137,13 @@ class DllExport TDBDOS : public TDBASE { virtual void ResetDB(void) {Txfp->Reset();} virtual bool IsUsingTemp(PGLOBAL g); virtual void ResetSize(void) {MaxSize = Cardinal = -1;} - virtual int ResetTableOpt(PGLOBAL g, bool dox); + virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); + virtual int MakeBlockValues(PGLOBAL g); + virtual bool SaveBlockValues(PGLOBAL g); + virtual bool GetBlockValues(PGLOBAL g); + virtual PBF InitBlockFilter(PGLOBAL g, PFIL filp); +//virtual PBX InitBlockIndex(PGLOBAL g); + virtual int TestBlock(PGLOBAL g); virtual void PrintAM(FILE *f, char *m); // Database routines @@ -132,7 +152,7 @@ class DllExport TDBDOS : public TDBASE { virtual int GetFileLength(PGLOBAL g) {return Txfp->GetFileLength(g);} virtual int GetProgMax(PGLOBAL g); virtual int GetProgCur(void); - virtual int GetAffectedRows(void) {return Txfp->GetDelRows();} +//virtual int GetAffectedRows(void) {return Txfp->GetDelRows();} virtual int GetRecpos(void) {return Txfp->GetPos();} virtual bool SetRecpos(PGLOBAL g, int recpos) {return Txfp->SetPos(g, recpos);} @@ -151,15 +171,24 @@ class DllExport TDBDOS : public TDBASE { // Optimization routines virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add); + bool InitialyzeIndex(PGLOBAL g, PIXDEF xdp); + void ResetBlockFilter(PGLOBAL g); + bool GetDistinctColumnValues(PGLOBAL g, int nrec); protected: + PBF CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv); + // Members PTXF Txfp; // To the File access method class +//PBX To_BlkIdx; // To index test block + PBF To_BlkFil; // To evaluation block filter + PFIL SavFil; // Saved hidden filter char *To_Line; // Points to current processed line - int Cardinal; // Table Cardinality - RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT) + bool Abort; // TRUE when aborting UPDATE/DELETE int Lrecl; // Logical Record Length int AvgLen; // Logical Record Average Length +//int Xeval; // BlockTest return value + int Beval; // BlockEval return value }; // end of class TDBDOS /***********************************************************************/ @@ -178,19 +207,38 @@ class DllExport DOSCOL : public COLBLK { // Implementation virtual int GetAmType(void) {return TYPE_AM_DOS;} virtual void SetTo_Val(PVAL valp) {To_Val = valp;} + virtual int GetClustered(void) {return Clustered;} + virtual int IsClustered(void) {return (Clustered && + ((PDOSDEF)(((PTDBDOS)To_Tdb)->To_Def))->IsOptimized());} + virtual int IsSorted(void) {return Sorted;} + virtual PVBLK GetMin(void) {return Min;} + virtual PVBLK GetMax(void) {return Max;} + virtual int GetNdv(void) {return Ndv;} + virtual int GetNbm(void) {return Nbm;} + virtual PVBLK GetBmap(void) {return Bmap;} + virtual PVBLK GetDval(void) {return Dval;} // Methods + virtual bool VarSize(void); virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); virtual void Print(PGLOBAL g, FILE *, uint); protected: + virtual bool SetMinMax(PGLOBAL g); + virtual bool SetBitMap(PGLOBAL g); + bool CheckSorted(PGLOBAL g); + bool AddDistinctValue(PGLOBAL g); // Default constructor not to be used DOSCOL(void) {} // Members + PVBLK Min; // Array of block min values + PVBLK Max; // Array of block max values + PVBLK Bmap; // Array of block bitmap values + PVBLK Dval; // Array of column distinct values PVAL To_Val; // To value used for Update/Insert PVAL OldVal; // The previous value of the object. char *Buf; // Buffer used in read/write operations @@ -199,6 +247,10 @@ class DllExport DOSCOL : public COLBLK { bool Nod; // True if no decimal point int Dcm; // Last Dcm digits are decimals int Deplac; // Offset in dos_buf + int Clustered; // 0:No 1:Yes + int Sorted; // 0:No 1:Asc (2:Desc - NIY) + int Ndv; // Number of distinct values + int Nbm; // Number of uint in bitmap }; // end of class DOSCOL #endif // __TABDOS_H diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp index cda08b7e3a6..fe04fe52627 100644 --- a/storage/connect/tabfix.cpp +++ b/storage/connect/tabfix.cpp @@ -45,6 +45,8 @@ #include "filamfix.h" #include "filamdbf.h" #include "tabfix.h" // TDBFIX, FIXCOL classes declares +#include "array.h" +#include "blkfil.h" /***********************************************************************/ /* DB static variables. */ @@ -61,12 +63,10 @@ static const longlong M4G = (longlong)2 * M2G; /***********************************************************************/ TDBFIX::TDBFIX(PDOSDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) { -//Cardinal = -1; } // end of TDBFIX standard constructor TDBFIX::TDBFIX(PGLOBAL g, PTDBFIX tdbp) : TDBDOS(g, tdbp) { -//Cardinal = tdbp->Cardinal; } // end of TDBFIX copy constructor // Method @@ -123,10 +123,48 @@ PCOL TDBFIX::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ /* Remake the indexes after the table was modified. */ /***********************************************************************/ -int TDBFIX::ResetTableOpt(PGLOBAL g, bool dox) +int TDBFIX::ResetTableOpt(PGLOBAL g, bool dop, bool dox) { + int prc, rc = RC_OK; + + To_Filter = NULL; // Disable filtering +//To_BlkIdx = NULL; // and block filtering + To_BlkFil = NULL; // and index filtering + Cardinality(g); // If called by create RestoreNrec(); // May have been modified - return TDBDOS::ResetTableOpt(g, dox); + MaxSize = -1; // Size must be recalculated + Cardinal = -1; // as well as Cardinality + + if (dop) { + Columns = NULL; // Not used anymore + Txfp->Reset(); +// OldBlk = CurBlk = -1; +// ReadBlks = CurNum = Rbuf = Modif = 0; + Use = USE_READY; // So the table can be reopened + Mode = MODE_ANY; // Just to be clean + rc = MakeBlockValues(g); // Redo optimization + } // endif dop + + if (dox && (rc == RC_OK || rc == RC_INFO)) { + // Remake eventual indexes + Columns = NULL; // Not used anymore + Txfp->Reset(); // New start + Use = USE_READY; // So the table can be reopened + Mode = MODE_READ; // New mode + prc = rc; + + if (!(PlgGetUser(g)->Check & CHK_OPT)) { + // After the table was modified the indexes + // are invalid and we should mark them as such... + rc = ((PDOSDEF)To_Def)->InvalidateIndex(g); + } else + // ... or we should remake them. + rc = MakeIndex(g, NULL, FALSE); + + rc = (rc == RC_INFO) ? prc : rc; + } // endif dox + + return rc; } // end of ResetTableOpt /***********************************************************************/ @@ -138,6 +176,11 @@ void TDBFIX::RestoreNrec(void) Txfp->Nrec = (To_Def && To_Def->GetElemt()) ? To_Def->GetElemt() : DOS_BUFF_LEN; Txfp->Blksize = Txfp->Nrec * Txfp->Lrecl; + + if (Cardinal >= 0) + Txfp->Block = (Cardinal > 0) + ? (Cardinal + Txfp->Nrec - 1) / Txfp->Nrec : 0; + } // endif Padded } // end of RestoreNrec @@ -163,9 +206,18 @@ int TDBFIX::Cardinality(PGLOBAL g) /***********************************************************************/ int TDBFIX::GetMaxSize(PGLOBAL g) { - if (MaxSize < 0) + if (MaxSize < 0) { MaxSize = Cardinality(g); + if (MaxSize > 0 && (To_BlkFil = InitBlockFilter(g, To_Filter)) + && !To_BlkFil->Correlated()) { + // Use BlockTest to reduce the estimated size + MaxSize = Txfp->MaxBlkSize(g, MaxSize); + ResetBlockFilter(g); + } // endif To_BlkFil + + } // endif MaxSize + return MaxSize; } // end of GetMaxSize @@ -246,6 +298,7 @@ bool TDBFIX::OpenDB(PGLOBAL g) else Txfp->Rewind(); // see comment in Work.log + ResetBlockFilter(g); return false; } // endif use @@ -277,8 +330,13 @@ bool TDBFIX::OpenDB(PGLOBAL g) /*********************************************************************/ To_Line = Txfp->GetBuf(); // For WriteDB + /*********************************************************************/ + /* Allocate the block filter tree if evaluation is possible. */ + /*********************************************************************/ + To_BlkFil = InitBlockFilter(g, To_Filter); + if (trace) - htrc("OpenDos: R%hd mode=%d\n", Tdb_No, Mode); + htrc("OpenFix: R%hd mode=%d BlkFil=%p\n", Tdb_No, Mode, To_BlkFil); /*********************************************************************/ /* Reset buffer access according to indexing and to mode. */ diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h index bcd171b37bb..5feb3589928 100644 --- a/storage/connect/tabfix.h +++ b/storage/connect/tabfix.h @@ -38,7 +38,7 @@ class DllExport TDBFIX : public TDBDOS { virtual void ResetDB(void); virtual bool IsUsingTemp(PGLOBAL g); virtual int RowNumber(PGLOBAL g, bool b = false); - virtual int ResetTableOpt(PGLOBAL g, bool dox); + virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); virtual void ResetSize(void); virtual int GetBadLines(void) {return Txfp->GetNerr();} diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 7665395167d..c015b6adad3 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -1,11 +1,11 @@ /************* TabFmt C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABFMT */ /* ------------- */ -/* Version 3.8 */ +/* Version 3.9 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2001 - 2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2001 - 2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -16,7 +16,7 @@ /***********************************************************************/ /***********************************************************************/ -/* Include relevant MariaDB header file. */ +/* Include relevant MariaDB header file. */ /***********************************************************************/ #include "my_global.h" @@ -145,7 +145,7 @@ PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, n = strlen(buf) + 1; buf[n - 2] = '\0'; #if defined(UNIX) - // The file can be imported from Windows + // The file can be imported from Windows if (buf[n - 3] == '\r') buf[n - 3] = 0; #endif // UNIX @@ -202,7 +202,7 @@ PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, n = strlen(buf); buf[n - 1] = '\0'; #if defined(UNIX) - // The file can be imported from Windows + // The file can be imported from Windows if (buf[n - 2] == '\r') buf[n - 2] = 0; #endif // UNIX @@ -392,7 +392,7 @@ CSVDEF::CSVDEF(void) Fmtd = Accept = Header = false; Maxerr = 0; Quoted = -1; - Sep = ','; + Sep = ','; Qot = '\0'; } // end of CSVDEF constructor @@ -458,10 +458,9 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) #if defined(ZIP_SUPPORT) if (Compressed == 1) txfp = new(g) ZIPFAM(this); - else { - strcpy(g->Message, "Compress 2 not supported yet"); - return NULL; - } // endelse + else + txfp = new(g) ZLBFAM(this); + #else // !ZIP_SUPPORT strcpy(g->Message, "Compress not supported"); return NULL; @@ -591,22 +590,17 @@ bool TDBCSV::CheckErr(void) /***********************************************************************/ int TDBCSV::EstimatedLength(PGLOBAL g) { + int n = 0; + PCOLDEF cdp; + if (trace) htrc("EstimatedLength: Fields=%d Columns=%p\n", Fields, Columns); - - if (!Fields) { - PCSVCOL colp; - for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next) - if (!colp->IsSpecial() && !colp->IsVirtual()) // A true column - Fields = MY_MAX(Fields, (int)colp->Fldnum); - - if (Columns) - Fields++; // Fldnum was 0 based + for (cdp = To_Def->GetCols(); cdp; cdp = cdp->GetNext()) + if (!cdp->IsSpecial() && !cdp->IsVirtual()) // A true column + n++; - } // endif Fields - - return (int)Fields; // Number of separators if all fields are null + return --n; // Number of separators if all fields are null } // end of Estimated Length #if 0 @@ -649,7 +643,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) } else for (cdp = tdp->GetCols(); cdp; cdp = cdp->GetNext()) - if (!cdp->IsVirtual()) + if (!cdp->IsSpecial() && !cdp->IsVirtual()) Fields++; Offset = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields); @@ -686,7 +680,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) } else // MODE_UPDATE for (cdp = tdp->GetCols(); cdp; cdp = cdp->GetNext()) - if (!cdp->IsVirtual()) { + if (!cdp->IsSpecial() && !cdp->IsVirtual()) { i = cdp->GetOffset() - 1; len = cdp->GetLength(); Field[i] = (PSZ)PlugSubAlloc(g, NULL, len + 1); @@ -1080,7 +1074,7 @@ PCOL TDBFMT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) int TDBFMT::EstimatedLength(PGLOBAL g) { // This is rather stupid !!! - return ((PDOSDEF)To_Def)->GetEnding() + (int)((Lrecl / 10) + 1); + return ((PDOSDEF)To_Def)->GetEnding() + (int)((Lrecl / 10) + 1); } // end of EstimatedLength /***********************************************************************/ @@ -1118,7 +1112,8 @@ bool TDBFMT::OpenDB(PGLOBAL g) // Get the column formats for (cdp = tdp->GetCols(); cdp; cdp = cdp->GetNext()) - if (!cdp->IsVirtual() && (i = cdp->GetOffset() - 1) < Fields) { + if (!cdp->IsSpecial() && !cdp->IsVirtual() + && (i = cdp->GetOffset() - 1) < Fields) { if (!(pfm = cdp->GetFmt())) { sprintf(g->Message, MSG(NO_FLD_FORMAT), i + 1, Name); return true; @@ -1275,6 +1270,25 @@ CSVCOL::CSVCOL(CSVCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp) } // end of CSVCOL copy constructor /***********************************************************************/ +/* VarSize: This function tells UpdateDB whether or not the block */ +/* optimization file must be redone if this column is updated, even */ +/* it is not sorted or clustered. This applies to a blocked table, */ +/* because if it is updated using a temporary file, the block size */ +/* may be modified. */ +/***********************************************************************/ +bool CSVCOL::VarSize(void) + { + PTXF txfp = ((PTDBCSV)To_Tdb)->Txfp; + + if (txfp->IsBlocked() && txfp->GetUseTemp()) + // Blocked table using a temporary file + return true; + else + return false; + + } // end VarSize + +/***********************************************************************/ /* ReadColumn: call DOSCOL::ReadColumn after having set the offet */ /* and length of the field to read as calculated by TDBCSV::ReadDB. */ /***********************************************************************/ @@ -1408,7 +1422,7 @@ TDBCCL::TDBCCL(PCSVDEF tdp) : TDBCAT(tdp) Hdr = tdp->Header; Mxr = tdp->Maxerr; Qtd = tdp->Quoted; - Sep = tdp->Sep; + Sep = tdp->Sep; } // end of TDBCCL constructor /***********************************************************************/ @@ -1417,6 +1431,6 @@ TDBCCL::TDBCCL(PCSVDEF tdp) : TDBCAT(tdp) PQRYRES TDBCCL::GetResult(PGLOBAL g) { return CSVColumns(g, Fn, Sep, Qtd, Hdr, Mxr, false); - } // end of GetResult + } // end of GetResult /* ------------------------ End of TabFmt ---------------------------- */ diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h index 5efa824c2e2..aa14b4481f0 100644 --- a/storage/connect/tabfmt.h +++ b/storage/connect/tabfmt.h @@ -1,187 +1,188 @@ -/*************** TabFmt H Declares Source Code File (.H) ***************/ -/* Name: TABFMT.H Version 2.3 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 2001-2013 */ -/* */ -/* This file contains the CSV and FMT classes declares. */ -/***********************************************************************/ -#include "xtable.h" // Base class declares -#include "tabdos.h" - -typedef class TDBFMT *PTDBFMT; - -/***********************************************************************/ -/* Functions used externally. */ -/***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, - int hdr, int mxr, bool info); - -/***********************************************************************/ -/* CSV table. */ -/***********************************************************************/ -class DllExport CSVDEF : public DOSDEF { /* Logical table description */ - friend class TDBCSV; - friend class TDBCCL; - public: - // Constructor - CSVDEF(void); - - // Implementation - virtual const char *GetType(void) {return "CSV";} - char GetSep(void) {return Sep;} - char GetQot(void) {return Qot;} - - // Methods - virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); - virtual PTDB GetTable(PGLOBAL g, MODE mode); - - protected: - // Members - bool Fmtd; /* true for formatted files */ -//bool Accept; /* true if wrong lines are accepted */ - bool Header; /* true if first line contains headers */ -//int Maxerr; /* Maximum number of bad records */ - int Quoted; /* Quoting level for quoted fields */ - char Sep; /* Separator for standard CSV files */ - char Qot; /* Character for quoted strings */ - }; // end of CSVDEF - -/***********************************************************************/ -/* This is the DOS/UNIX Access Method class declaration for files */ -/* that are CSV files with columns separated by the Sep character. */ -/***********************************************************************/ -class TDBCSV : public TDBDOS { - friend class CSVCOL; - public: - // Constructor - TDBCSV(PCSVDEF tdp, PTXF txfp); - TDBCSV(PGLOBAL g, PTDBCSV tdbp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_CSV;} - virtual PTDB Duplicate(PGLOBAL g) - {return (PTDB)new(g) TDBCSV(g, this);} - - // Methods - virtual PTDB CopyOne(PTABS t); -//virtual bool IsUsingTemp(PGLOBAL g); - virtual int GetBadLines(void) {return (int)Nerr;} - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual bool OpenDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); - virtual int CheckWrite(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); // Physical file read - - // Specific routines - virtual int EstimatedLength(PGLOBAL g); - virtual bool SkipHeader(PGLOBAL g); - virtual bool CheckErr(void); - - protected: - // Members - PSZ *Field; // Field to write to current line - int *Offset; // Column offsets for current record - int *Fldlen; // Column field length for current record - bool *Fldtyp; // true for numeric fields - int Fields; // Number of fields to handle - int Nerr; // Number of bad records - int Maxerr; // Maximum number of bad records - int Quoted; // Quoting level for quoted fields - bool Accept; // true if bad lines are accepted - bool Header; // true if first line contains column headers - char Sep; // Separator - char Qot; // Quoting character - }; // end of class TDBCSV - -/***********************************************************************/ -/* Class CSVCOL: CSV access method column descriptor. */ -/* This A.M. is used for Comma Separated V(?) files. */ -/***********************************************************************/ -class CSVCOL : public DOSCOL { - friend class TDBCSV; - friend class TDBFMT; - public: - // Constructors - CSVCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i); - CSVCOL(CSVCOL *colp, PTDB tdbp); // Constructor used in copy process - - // Implementation - virtual int GetAmType() {return TYPE_AM_CSV;} - - // Methods - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); - - protected: - // Default constructor not to be used - CSVCOL(void) {} - - // Members - int Fldnum; // Field ordinal number (0 based) - }; // end of class CSVCOL - -/***********************************************************************/ -/* This is the DOS/UNIX Access Method class declaration for files */ -/* whose record format is described by a Format keyword. */ -/***********************************************************************/ -class TDBFMT : public TDBCSV { - friend class CSVCOL; -//friend class FMTCOL; - public: - // Standard constructor - TDBFMT(PCSVDEF tdp, PTXF txfp) : TDBCSV(tdp, txfp) - {FldFormat = NULL; To_Fld = NULL; FmtTest = NULL; Linenum = 0;} - - // Copy constructor - TDBFMT(PGLOBAL g, PTDBFMT tdbp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_FMT;} - virtual PTDB Duplicate(PGLOBAL g) - {return (PTDB)new(g) TDBFMT(g, this);} - - // Methods - virtual PTDB CopyOne(PTABS t); - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); -//virtual int GetMaxSize(PGLOBAL g); - virtual bool OpenDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); -//virtual int CheckWrite(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); // Physical file read - - // Specific routines - virtual int EstimatedLength(PGLOBAL g); - - protected: - // Members - PSZ *FldFormat; // Field read format - void *To_Fld; // To field test buffer - int *FmtTest; // Test on ending by %n or %m - int Linenum; // Last read line - }; // end of class TDBFMT - -/***********************************************************************/ -/* This is the class declaration for the CSV catalog table. */ -/***********************************************************************/ -class TDBCCL : public TDBCAT { - public: - // Constructor - TDBCCL(PCSVDEF tdp); - - protected: - // Specific routines - virtual PQRYRES GetResult(PGLOBAL g); - - // Members - char *Fn; // The CSV file (path) name - bool Hdr; // true if first line contains headers - int Mxr; // Maximum number of bad records - int Qtd; // Quoting level for quoted fields - char Sep; // Separator for standard CSV files - }; // end of class TDBCCL - -/* ------------------------- End of TabFmt.H ------------------------- */ +/*************** TabFmt H Declares Source Code File (.H) ***************/
+/* Name: TABFMT.H Version 2.4 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2001-2014 */
+/* */
+/* This file contains the CSV and FMT classes declares. */
+/***********************************************************************/
+#include "xtable.h" // Base class declares
+#include "tabdos.h"
+
+typedef class TDBFMT *PTDBFMT;
+
+/***********************************************************************/
+/* Functions used externally. */
+/***********************************************************************/
+PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q,
+ int hdr, int mxr, bool info);
+
+/***********************************************************************/
+/* CSV table. */
+/***********************************************************************/
+class DllExport CSVDEF : public DOSDEF { /* Logical table description */
+ friend class TDBCSV;
+ friend class TDBCCL;
+ public:
+ // Constructor
+ CSVDEF(void);
+
+ // Implementation
+ virtual const char *GetType(void) {return "CSV";}
+ char GetSep(void) {return Sep;}
+ char GetQot(void) {return Qot;}
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE mode);
+
+ protected:
+ // Members
+ bool Fmtd; /* true for formatted files */
+//bool Accept; /* true if wrong lines are accepted */
+ bool Header; /* true if first line contains headers */
+//int Maxerr; /* Maximum number of bad records */
+ int Quoted; /* Quoting level for quoted fields */
+ char Sep; /* Separator for standard CSV files */
+ char Qot; /* Character for quoted strings */
+ }; // end of CSVDEF
+
+/***********************************************************************/
+/* This is the DOS/UNIX Access Method class declaration for files */
+/* that are CSV files with columns separated by the Sep character. */
+/***********************************************************************/
+class TDBCSV : public TDBDOS {
+ friend class CSVCOL;
+ public:
+ // Constructor
+ TDBCSV(PCSVDEF tdp, PTXF txfp);
+ TDBCSV(PGLOBAL g, PTDBCSV tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_CSV;}
+ virtual PTDB Duplicate(PGLOBAL g)
+ {return (PTDB)new(g) TDBCSV(g, this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+//virtual bool IsUsingTemp(PGLOBAL g);
+ virtual int GetBadLines(void) {return (int)Nerr;}
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual int CheckWrite(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g); // Physical file read
+
+ // Specific routines
+ virtual int EstimatedLength(PGLOBAL g);
+ virtual bool SkipHeader(PGLOBAL g);
+ virtual bool CheckErr(void);
+
+ protected:
+ // Members
+ PSZ *Field; // Field to write to current line
+ int *Offset; // Column offsets for current record
+ int *Fldlen; // Column field length for current record
+ bool *Fldtyp; // true for numeric fields
+ int Fields; // Number of fields to handle
+ int Nerr; // Number of bad records
+ int Maxerr; // Maximum number of bad records
+ int Quoted; // Quoting level for quoted fields
+ bool Accept; // true if bad lines are accepted
+ bool Header; // true if first line contains column headers
+ char Sep; // Separator
+ char Qot; // Quoting character
+ }; // end of class TDBCSV
+
+/***********************************************************************/
+/* Class CSVCOL: CSV access method column descriptor. */
+/* This A.M. is used for Comma Separated V(?) files. */
+/***********************************************************************/
+class CSVCOL : public DOSCOL {
+ friend class TDBCSV;
+ friend class TDBFMT;
+ public:
+ // Constructors
+ CSVCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i);
+ CSVCOL(CSVCOL *colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ virtual int GetAmType() {return TYPE_AM_CSV;}
+
+ // Methods
+ virtual bool VarSize(void);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+
+ protected:
+ // Default constructor not to be used
+ CSVCOL(void) {}
+
+ // Members
+ int Fldnum; // Field ordinal number (0 based)
+ }; // end of class CSVCOL
+
+/***********************************************************************/
+/* This is the DOS/UNIX Access Method class declaration for files */
+/* whose record format is described by a Format keyword. */
+/***********************************************************************/
+class TDBFMT : public TDBCSV {
+ friend class CSVCOL;
+//friend class FMTCOL;
+ public:
+ // Standard constructor
+ TDBFMT(PCSVDEF tdp, PTXF txfp) : TDBCSV(tdp, txfp)
+ {FldFormat = NULL; To_Fld = NULL; FmtTest = NULL; Linenum = 0;}
+
+ // Copy constructor
+ TDBFMT(PGLOBAL g, PTDBFMT tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_FMT;}
+ virtual PTDB Duplicate(PGLOBAL g)
+ {return (PTDB)new(g) TDBFMT(g, this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+//virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+//virtual int CheckWrite(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g); // Physical file read
+
+ // Specific routines
+ virtual int EstimatedLength(PGLOBAL g);
+
+ protected:
+ // Members
+ PSZ *FldFormat; // Field read format
+ void *To_Fld; // To field test buffer
+ int *FmtTest; // Test on ending by %n or %m
+ int Linenum; // Last read line
+ }; // end of class TDBFMT
+
+/***********************************************************************/
+/* This is the class declaration for the CSV catalog table. */
+/***********************************************************************/
+class TDBCCL : public TDBCAT {
+ public:
+ // Constructor
+ TDBCCL(PCSVDEF tdp);
+
+ protected:
+ // Specific routines
+ virtual PQRYRES GetResult(PGLOBAL g);
+
+ // Members
+ char *Fn; // The CSV file (path) name
+ bool Hdr; // true if first line contains headers
+ int Mxr; // Maximum number of bad records
+ int Qtd; // Quoting level for quoted fields
+ char Sep; // Separator for standard CSV files
+ }; // end of class TDBCCL
+
+/* ------------------------- End of TabFmt.H ------------------------- */
diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp index 325e80945f7..94c00ebb061 100644 --- a/storage/connect/table.cpp +++ b/storage/connect/table.cpp @@ -45,19 +45,22 @@ TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum) { Use = USE_NO; To_Orig = NULL; + To_Filter = NULL; To_CondFil = NULL; Next = NULL; Name = (tdp) ? tdp->GetName() : NULL; To_Table = NULL; Columns = NULL; Degree = (tdp) ? tdp->GetDegree() : 0; - Mode = MODE_READ; + Mode = MODE_ANY; + Cardinal = -1; } // end of TDB standard constructor TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum) { Use = tdbp->Use; To_Orig = tdbp; + To_Filter = NULL; To_CondFil = NULL; Next = NULL; Name = tdbp->Name; @@ -65,6 +68,7 @@ TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum) Columns = NULL; Degree = tdbp->Degree; Mode = tdbp->Mode; + Cardinal = tdbp->Cardinal; } // end of TDB copy constructor // Methods @@ -137,7 +141,9 @@ TDBASE::TDBASE(PTABDEF tdp) : TDB(tdp) To_Link = NULL; To_Key_Col = NULL; To_Kindex = NULL; + To_Xdp = NULL; To_SetCols = NULL; + Ftype = RECFM_NAF; MaxSize = -1; Knum = 0; Read_Only = (tdp) ? tdp->IsReadOnly() : false; @@ -147,8 +153,14 @@ TDBASE::TDBASE(PTABDEF tdp) : TDB(tdp) TDBASE::TDBASE(PTDBASE tdbp) : TDB(tdbp) { To_Def = tdbp->To_Def; + To_Link = tdbp->To_Link; + To_Key_Col = tdbp->To_Key_Col; + To_Kindex = tdbp->To_Kindex; + To_Xdp = tdbp->To_Xdp; To_SetCols = tdbp->To_SetCols; // ??? + Ftype = tdbp->Ftype; MaxSize = tdbp->MaxSize; + Knum = tdbp->Knum; Read_Only = tdbp->Read_Only; m_data_charset= tdbp->m_data_charset; } // end of TDBASE copy constructor @@ -167,7 +179,7 @@ PCATLG TDBASE::GetCat(void) CHARSET_INFO *TDBASE::data_charset(void) { // If no DATA_CHARSET is specified, we assume that character - // set of the remote data is the same with CHARACTER SET + // set of the remote data is the same with CHARACTER SET // definition of the SQL column. return m_data_charset ? m_data_charset : &my_charset_bin; } // end of data_charset @@ -219,12 +231,12 @@ PCOL TDBASE::ColDB(PGLOBAL g, PSZ name, int num) colp = cp; else if (!(cdp->Flags & U_SPECIAL)) colp = MakeCol(g, cdp, cprec, i); - else if (Mode == MODE_READ) + else if (Mode != MODE_INSERT) colp = InsertSpcBlk(g, cdp); if (trace) htrc("colp=%p\n", colp); - + if (name || num) break; else if (colp && !colp->IsSpecial()) @@ -259,22 +271,38 @@ PCOL TDBASE::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp) PCOL colp; cp= new(g) COLUMN(cdp->GetName()); - cp->SetTo_Table(To_Table); - if (!stricmp(name, "FILEID") || - !stricmp(name, "SERVID")) { + if (! To_Table) { + strcpy(g->Message, "Cannot make special column: To_Table is NULL"); + return NULL; + } else + cp->SetTo_Table(To_Table); + + if (!stricmp(name, "FILEID") || !stricmp(name, "FDISK") || + !stricmp(name, "FPATH") || !stricmp(name, "FNAME") || + !stricmp(name, "FTYPE") || !stricmp(name, "SERVID")) { if (!To_Def || !(To_Def->GetPseudo() & 2)) { sprintf(g->Message, MSG(BAD_SPEC_COLUMN)); return NULL; } // endif Pseudo if (!stricmp(name, "FILEID")) - colp = new(g) FIDBLK(cp); + colp = new(g) FIDBLK(cp, OP_XX); + else if (!stricmp(name, "FDISK")) + colp = new(g) FIDBLK(cp, OP_FDISK); + else if (!stricmp(name, "FPATH")) + colp = new(g) FIDBLK(cp, OP_FPATH); + else if (!stricmp(name, "FNAME")) + colp = new(g) FIDBLK(cp, OP_FNAME); + else if (!stricmp(name, "FTYPE")) + colp = new(g) FIDBLK(cp, OP_FTYPE); else colp = new(g) SIDBLK(cp); } else if (!stricmp(name, "TABID")) { colp = new(g) TIDBLK(cp); + } else if (!stricmp(name, "PARTID")) { + colp = new(g) PRTBLK(cp); //} else if (!stricmp(name, "CONID")) { // colp = new(g) CIDBLK(cp); } else if (!stricmp(name, "ROWID")) { @@ -297,7 +325,7 @@ PCOL TDBASE::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp) /***********************************************************************/ /* ResetTableOpt: Wrong for this table type. */ /***********************************************************************/ -int TDBASE::ResetTableOpt(PGLOBAL g, bool dox) +int TDBASE::ResetTableOpt(PGLOBAL g, bool dop, bool dox) { strcpy(g->Message, "This table is not indexable"); return RC_INFO; @@ -324,7 +352,7 @@ void TDBASE::ResetKindex(PGLOBAL g, PKXBASE kxp) /***********************************************************************/ /* SetRecpos: Replace the table at the specified position. */ /***********************************************************************/ -bool TDBASE::SetRecpos(PGLOBAL g, int recpos) +bool TDBASE::SetRecpos(PGLOBAL g, int recpos) { strcpy(g->Message, MSG(SETRECPOS_NIY)); return true; @@ -389,8 +417,8 @@ PCOL TDBCAT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ bool TDBCAT::Initialize(PGLOBAL g) { - if (Init) - return false; + if (Init) + return false; if (!(Qrp = GetResult(g))) return true; @@ -405,9 +433,9 @@ bool TDBCAT::Initialize(PGLOBAL g) PushWarning(g, this); } // endif Badlines - Init = true; - return false; - } // end of Initialize + Init = true; + return false; + } // end of Initialize /***********************************************************************/ /* CAT: Get the number of properties. */ @@ -487,7 +515,7 @@ bool TDBCAT::InitCol(PGLOBAL g) /***********************************************************************/ /* SetRecpos: Replace the table at the specified position. */ /***********************************************************************/ -bool TDBCAT::SetRecpos(PGLOBAL g, int recpos) +bool TDBCAT::SetRecpos(PGLOBAL g, int recpos) { N = recpos - 1; return false; diff --git a/storage/connect/tabmac.h b/storage/connect/tabmac.h index eb115b18049..5e6c98d68fb 100644 --- a/storage/connect/tabmac.h +++ b/storage/connect/tabmac.h @@ -58,6 +58,7 @@ class TDBMAC : public TDBASE { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g) {return GetMaxSize(g);} virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index 37c72501840..d84f6c9aab4 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -1,11 +1,11 @@ /************* TabMySQL C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: TABMYSQL */ /* ------------- */ -/* Version 1.7 */ +/* Version 1.9 */ /* */ /* AUTHOR: */ /* ------- */ -/* Olivier BERTRAND 2007-2013 */ +/* Olivier BERTRAND 2007-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -69,6 +69,10 @@ void PrintResult(PGLOBAL, PSEM, PQRYRES); extern "C" int trace; +// Used to check whether a MYSQL table is created on itself +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, + const char *db, char *tab, const char *src, int port); + /* -------------- Implementation of the MYSQLDEF class --------------- */ /***********************************************************************/ @@ -109,7 +113,7 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) } // endif server_name // get_server_by_name() clones the server if exists and allocates - // copies of strings in the supplied mem_root + // copies of strings in the supplied mem_root if (!(server= get_server_by_name(mem, server_name, &server_buffer))) { DBUG_PRINT("info", ("get_server_by_name returned > 0 error condition!")); /* need to come up with error handling */ @@ -181,7 +185,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) // connection name of either "server" or "server/table" // ok, so we do a little parsing, but not completely! if ((Tabname= strchr(url, '/'))) { - // If there is a single '/' in the connection string, + // If there is a single '/' in the connection string, // this means the user is specifying a table name *Tabname++= '\0'; @@ -260,7 +264,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) } // endif / } // endif Tabname - + } // endif database if ((sport = strchr(Hostname, ':'))) @@ -311,7 +315,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) // Normal case of specific MYSQL table url = GetStringCatInfo(g, "Connect", NULL); - if (!url || !*url) { + if (!url || !*url) { // Not using the connection URL Hostname = GetStringCatInfo(g, "Host", "localhost"); Database = GetStringCatInfo(g, "Database", "*"); @@ -353,8 +357,12 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Tabname = Name; } // endif am - if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) + if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) { + Read_Only = true; Isview = true; + } else if (CheckSelf(g, Hc->GetTable()->s, Hostname, Database, + Tabname, Srcdef, Portnumber)) + return true; // Used for Update and Delete Qrystr = GetStringCatInfo(g, "Query_String", "?"); @@ -436,7 +444,7 @@ TDBMYSQL::TDBMYSQL(PGLOBAL g, PTDBMY tdbp) : TDBASE(tdbp) Tabname = tdbp->Tabname; Srcdef = tdbp->Srcdef; User = tdbp->User; - Pwd = tdbp->Pwd; + Pwd = tdbp->Pwd; Qrystr = tdbp->Qrystr; Quoted = tdbp->Quoted; Port = tdbp->Port; @@ -603,9 +611,7 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g) else qlen += colp->GetLength(); - } // endif Prep - - if (Prep) + } else // Prep strcat(valist, "?"); } // endfor colp @@ -647,7 +653,7 @@ int TDBMYSQL::MakeCommand(PGLOBAL g) // Make a lower case copy of the originale query - qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1); + qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 5); strlwr(strcpy(qrystr, Qrystr)); // Check whether the table name is equal to a keyword @@ -667,6 +673,7 @@ int TDBMYSQL::MakeCommand(PGLOBAL g) strcat(Query, Tabname); strcat(Query, Qrystr + (p - qrystr) + strlen(name)); + strlwr(strcpy(qrystr, Query)); } else { sprintf(g->Message, "Cannot use this %s command", (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE"); @@ -740,33 +747,50 @@ int TDBMYSQL::MakeDelete(PGLOBAL g) #endif // 0 /***********************************************************************/ -/* XCV GetMaxSize: returns the maximum number of rows in the table. */ +/* MYSQL Cardinality: returns the number of rows in the table. */ /***********************************************************************/ -int TDBMYSQL::GetMaxSize(PGLOBAL g) - { - if (MaxSize < 0) { -#if 0 - if (MakeSelect(g)) - return -2; +int TDBMYSQL::Cardinality(PGLOBAL g) +{ + if (!g) + return (Mode == MODE_ANY && !Srcdef) ? 1 : 0; - if (!Myc.Connected()) { - if (Myc.Open(g, Host, Database, User, Pwd, Port)) - return -1; + if (Cardinal < 0 && Mode == MODE_ANY && !Srcdef) { + // Info command, we must return the exact table row number + char query[96]; + MYSQLC myc; - } // endif connected + if (myc.Open(g, Host, Database, User, Pwd, Port)) + return -1; - if ((MaxSize = Myc.GetResultSize(g, Query)) < 0) { - Myc.Close(); - return -3; - } // endif MaxSize + strcpy(query, "SELECT COUNT(*) FROM "); - // FIXME: Columns should be known when Info calls GetMaxSize - if (!Columns) - Query = NULL; // Must be remade when columns are known -#endif // 0 + if (Quoted > 0) + strcat(strcat(strcat(query, "`"), Tabname), "`"); + else + strcat(query, Tabname); + + Cardinal = myc.GetTableSize(g, query); + myc.Close(); + } else + Cardinal = 10; // To make MySQL happy + + return Cardinal; +} // end of Cardinality + +/***********************************************************************/ +/* MYSQL GetMaxSize: returns the maximum number of rows in the table. */ +/***********************************************************************/ +int TDBMYSQL::GetMaxSize(PGLOBAL g) + { + if (MaxSize < 0) { + if (Mode == MODE_DELETE) + // Return 0 in mode DELETE in case of delete all. + MaxSize = 0; + else if (!Cardinality(NULL)) + MaxSize = 10; // To make MySQL happy + else if ((MaxSize = Cardinality(g)) < 0) + MaxSize = 12; // So we can see an error occured - // Return 0 in mode DELETE in case of delete all. - MaxSize = (Mode == MODE_DELETE) ? 0 : 10; // To make MySQL happy } // endif MaxSize return MaxSize; @@ -881,11 +905,12 @@ bool TDBMYSQL::OpenDB(PGLOBAL g) } // endif MakeInsert if (m_Rc != RC_FX) { + int rc; char cmd[64]; int w; sprintf(cmd, "ALTER TABLE `%s` DISABLE KEYS", Tabname); - m_Rc = Myc.ExecSQL(g, cmd, &w); + rc = Myc.ExecSQL(g, cmd, &w); // may fail for some engines } // endif m_Rc } else @@ -1012,7 +1037,8 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const void *key, int len) { int oldlen = strlen(Query); - if (op == OP_NEXT) + if (!key || op == OP_NEXT || + Mode == MODE_UPDATE || Mode == MODE_DELETE) return false; else if (op == OP_FIRST) { if (To_CondFil) @@ -1031,7 +1057,7 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const void *key, int len) m_Rc = Myc.ExecSQL(g, Query); Query[oldlen] = 0; - return false; + return (m_Rc == RC_FX) ? true : false; } // end of ReadKey /***********************************************************************/ @@ -1081,13 +1107,13 @@ int TDBMYSQL::WriteDB(PGLOBAL g) // Make the Insert command value list for (PCOL colp = Columns; colp; colp = colp->GetNext()) { if (!colp->GetValue()->IsNull()) { - if (colp->GetResultType() == TYPE_STRING || + if (colp->GetResultType() == TYPE_STRING || colp->GetResultType() == TYPE_DATE) strcat(Qbuf, "'"); strcat(Qbuf, colp->GetValue()->GetCharString(buf)); - if (colp->GetResultType() == TYPE_STRING || + if (colp->GetResultType() == TYPE_STRING || colp->GetResultType() == TYPE_DATE) strcat(Qbuf, "'"); @@ -1109,7 +1135,7 @@ int TDBMYSQL::DeleteDB(PGLOBAL g, int irc) { if (irc == RC_FX) // Send the DELETE (all) command to the remote table - return (SendCommand(g) == RC_FX) ? RC_FX : RC_OK; + return (SendCommand(g) == RC_FX) ? RC_FX : RC_OK; else return RC_OK; // Ignore @@ -1129,7 +1155,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g) dup->Step = "Enabling indexes"; sprintf(cmd, "ALTER TABLE `%s` ENABLE KEYS", Tabname); Myc.m_Rows = -1; // To execute the query - m_Rc = Myc.ExecSQL(g, cmd, &w); + m_Rc = Myc.ExecSQL(g, cmd, &w); // May fail for some engines } // endif m_Rc Myc.Close(); @@ -1178,6 +1204,7 @@ MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am) char v = (!strcmp(chset, "binary")) ? 'B' : 0; Name = fld->name; + Opt = 0; Precision = Long = fld->length; Buf_Type = MYSQLtoPLG(fld->type, &v); strcpy(Format.Type, GetFormatType(Buf_Type)); @@ -1376,7 +1403,7 @@ void MYSQLCOL::WriteColumn(PGLOBAL g) /***********************************************************************/ /* Implementation of the TDBMYEXC class. */ /***********************************************************************/ -TDBMYEXC::TDBMYEXC(PMYDEF tdp) : TDBMYSQL(tdp) +TDBMYEXC::TDBMYEXC(PMYDEF tdp) : TDBMYSQL(tdp) { Cmdlist = NULL; Cmdcol = NULL; @@ -1528,7 +1555,7 @@ int TDBMYEXC::ReadDB(PGLOBAL g) if (Cmdlist) { // Process query to send int rc; - + do { Query = Cmdlist->Cmd; @@ -1548,7 +1575,7 @@ int TDBMYEXC::ReadDB(PGLOBAL g) case RC_INFO: Shw = true; } // endswitch rc - + Cmdlist = (Nerr > Mxr) ? NULL : Cmdlist->Next; } while (rc == RC_INFO); @@ -1645,11 +1672,11 @@ void MYXCOL::WriteColumn(PGLOBAL g) /***********************************************************************/ TDBMCL::TDBMCL(PMYDEF tdp) : TDBCAT(tdp) { - Host = tdp->Hostname; - Db = tdp->Database; - Tab = tdp->Tabname; - User = tdp->Username; - Pwd = tdp->Password; + Host = tdp->Hostname; + Db = tdp->Database; + Tab = tdp->Tabname; + User = tdp->Username; + Pwd = tdp->Password; Port = tdp->Portnumber; } // end of TDBMCL constructor @@ -1659,4 +1686,4 @@ TDBMCL::TDBMCL(PMYDEF tdp) : TDBCAT(tdp) PQRYRES TDBMCL::GetResult(PGLOBAL g) { return MyColumns(g, NULL, Host, Db, User, Pwd, Tab, NULL, Port, false); - } // end of GetResult + } // end of GetResult diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h index 96991fb14c1..68cf453a9e6 100644 --- a/storage/connect/tabmysql.h +++ b/storage/connect/tabmysql.h @@ -1,4 +1,4 @@ -// TDBMYSQL.H Olivier Bertrand 2007-2013 +// TDBMYSQL.H Olivier Bertrand 2007-2014 #include "myconn.h" // MySQL connection declares typedef class MYSQLDEF *PMYDEF; @@ -81,7 +81,7 @@ class TDBMYSQL : public TDBASE { // Methods virtual PTDB CopyOne(PTABS t); - virtual int GetAffectedRows(void) {return AftRows;} +//virtual int GetAffectedRows(void) {return AftRows;} virtual int GetRecpos(void) {return N;} virtual int GetProgMax(PGLOBAL g); virtual void ResetDB(void) {N = 0;} @@ -92,6 +92,7 @@ class TDBMYSQL : public TDBASE { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index 65226c9e36f..5542e832a54 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -100,7 +100,13 @@ ODBCDEF::ODBCDEF(void) /***********************************************************************/ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { - Desc = Connect = GetStringCatInfo(g, "Connect", ""); + Desc = Connect = GetStringCatInfo(g, "Connect", NULL); + + if (!Connect && !Catfunc) { + sprintf(g->Message, "Missing connection for ODBC table %s", Name); + return true; + } // endif Connect + Tabname = GetStringCatInfo(g, "Name", (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); Tabname = GetStringCatInfo(g, "Tabname", Tabname); @@ -108,7 +114,10 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Tabschema = GetStringCatInfo(g, "Schema", Tabschema); Tabcat = GetStringCatInfo(g, "Qualifier", NULL); Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); - Srcdef = GetStringCatInfo(g, "Srcdef", NULL); + + if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) + Read_Only = true; + Qrystr = GetStringCatInfo(g, "Query_String", "?"); Sep = GetStringCatInfo(g, "Separator", NULL); Catver = GetIntCatInfo("Catver", 2); @@ -655,40 +664,58 @@ void TDBODBC::ResetSize(void) } // end of ResetSize /***********************************************************************/ +/* ODBC Cardinality: returns table size in number of rows. */ +/***********************************************************************/ +int TDBODBC::Cardinality(PGLOBAL g) + { + if (!g) + return (Mode == MODE_ANY && !Srcdef) ? 1 : 0; + + if (Cardinal < 0 && Mode == MODE_ANY && !Srcdef) { + // Info command, we must return the exact table row number + char qry[96], tbn[64]; + ODBConn *ocp = new(g) ODBConn(g, this); + + if (ocp->Open(Connect, Options) < 1) + return -1; + + // Table name can be encoded in UTF-8 + Decode(TableName, tbn, sizeof(tbn)); + strcpy(qry, "SELECT COUNT(*) FROM "); + + if (Quote) + strcat(strcat(strcat(qry, Quote), tbn), Quote); + else + strcat(qry, tbn); + + // Allocate a Count(*) column (must not use the default constructor) + Cnp = new(g) ODBCCOL; + Cnp->InitValue(g); + + if ((Cardinal = ocp->GetResultSize(qry, Cnp)) < 0) + return -3; + + ocp->Close(); + } else + Cardinal = 10; // To make MySQL happy + + return Cardinal; + } // end of Cardinality + +/***********************************************************************/ /* ODBC GetMaxSize: returns table size estimate in number of lines. */ /***********************************************************************/ int TDBODBC::GetMaxSize(PGLOBAL g) { if (MaxSize < 0) { - // Make MariaDB happy - MaxSize = (Mode == MODE_DELETE) ? 0 : 10; -#if 0 - // This is unuseful and takes time - if (Srcdef) { - // Return a reasonable guess - MaxSize = 100; - return MaxSize; - } // endif Srcdef - - if (!Ocp) - Ocp = new(g) ODBConn(g, this); - - if (!Ocp->IsOpen()) - if (Ocp->Open(Connect, Options) < 1) - return -1; - - if (!Count && !(Count = MakeSQL(g, true))) - return -2; - - if (!Cnp) { - // Allocate a Count(*) column (must not use the default constructor) - Cnp = new(g) ODBCCOL; - Cnp->InitValue(g); - } // endif Cnp - - if ((MaxSize = Ocp->GetResultSize(Count, Cnp)) < 0) - return -3; -#endif // 0 + if (Mode == MODE_DELETE) + // Return 0 in mode DELETE in case of delete all. + MaxSize = 0; + else if (!Cardinality(NULL)) + MaxSize = 10; // To make MySQL happy + else if ((MaxSize = Cardinality(g)) < 0) + MaxSize = 12; // So we can see an error occured + } // endif MaxSize return MaxSize; diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h index 5db8cbb8cff..360f52c9d21 100644 --- a/storage/connect/tabodbc.h +++ b/storage/connect/tabodbc.h @@ -88,14 +88,15 @@ class TDBODBC : public TDBASE { virtual PSZ GetFile(PGLOBAL g); virtual void SetFile(PGLOBAL g, PSZ fn); virtual void ResetSize(void); - virtual int GetAffectedRows(void) {return AftRows;} +//virtual int GetAffectedRows(void) {return AftRows;} virtual PSZ GetServer(void) {return "ODBC";} virtual int Indexable(void) {return 2;} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetProgMax(PGLOBAL g); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); + virtual int GetProgMax(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual int WriteDB(PGLOBAL g); diff --git a/storage/connect/tabpivot.h b/storage/connect/tabpivot.h index c7248ee2e1d..25d139e895f 100644 --- a/storage/connect/tabpivot.h +++ b/storage/connect/tabpivot.h @@ -105,6 +105,7 @@ class TDBPIVOT : public TDBPRX { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g) {return (g) ? 10 : 0;} virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp index e8ea7f4e43a..ae92c0771b6 100644 --- a/storage/connect/tabsys.cpp +++ b/storage/connect/tabsys.cpp @@ -1,9 +1,9 @@ /************* TabSys C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABSYS */ /* ------------- */ -/* Version 2.2 */ +/* Version 2.3 */ /* */ -/* Author Olivier BERTRAND 2004-2013 */ +/* Author Olivier BERTRAND 2004-2014 */ /* */ /* This program are the INI/CFG tables classes. */ /***********************************************************************/ @@ -203,18 +203,35 @@ PCOL TDBINI::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) } // end of MakeCol /***********************************************************************/ -/* INI GetMaxSize: returns the number of sections in the INI file. */ +/* INI Cardinality: returns the number of sections in the INI file. */ /***********************************************************************/ -int TDBINI::GetMaxSize(PGLOBAL g) +int TDBINI::Cardinality(PGLOBAL g) { - if (MaxSize < 0 && GetSeclist(g)) { + if (!g) + return 1; + + if (Cardinal < 0) { // Count the number of sections from the section list - char *p; + char *p = GetSeclist(g); + + Cardinal = 0; + + if (p) + for (; *p; p += (strlen(p) + 1)) + Cardinal++; - for (MaxSize = 0, p = Seclist; *p; p += (strlen(p) + 1)) - MaxSize++; + } // endif Cardinal - } // endif MaxSize + return Cardinal; + } // end of Cardinality + +/***********************************************************************/ +/* INI GetMaxSize: returns the table cardinality. */ +/***********************************************************************/ +int TDBINI::GetMaxSize(PGLOBAL g) + { + if (MaxSize < 0) + MaxSize = Cardinality(g); return MaxSize; } // end of GetMaxSize @@ -609,22 +626,28 @@ PCOL TDBXIN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) } // end of MakeCol /***********************************************************************/ -/* XIN GetMaxSize: returns the number of sections in the XIN file. */ +/* XIN Cardinality: returns the number of keys in the XIN file. */ /***********************************************************************/ -int TDBXIN::GetMaxSize(PGLOBAL g) +int TDBXIN::Cardinality(PGLOBAL g) { - if (MaxSize < 0 && GetSeclist(g)) { + if (!g) + return 1; + + if (Cardinal < 0) { // Count the number of keys from the section list - char *p, *k; + char *k, *p = GetSeclist(g); - for (MaxSize = 0, p = Seclist; *p; p += (strlen(p) + 1)) - for (k = GetKeylist(g, p); *k; k += (strlen(k) + 1)) - MaxSize++; + Cardinal = 0; - } // endif MaxSize + if (p) + for (; *p; p += (strlen(p) + 1)) + for (k = GetKeylist(g, p); *k; k += (strlen(k) + 1)) + Cardinal++; - return MaxSize; - } // end of GetMaxSize + } // endif Cardinal + + return Cardinal; + } // end of Cardinality /***********************************************************************/ /* Record position is Section+Key. */ @@ -633,7 +656,7 @@ int TDBXIN::GetRecpos(void) { union { short X[2]; // Section and Key offsets - int Xpos; // File position + int Xpos; // File position }; // end of union X[0] = (short)(Section - Seclist); @@ -648,7 +671,7 @@ bool TDBXIN::SetRecpos(PGLOBAL g, int recpos) { union { short X[2]; // Section and Key offsets - int Xpos; // File position + int Xpos; // File position }; // end of union Xpos = recpos; diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h index 38b71d62ac4..aa45c260bc2 100644 --- a/storage/connect/tabsys.h +++ b/storage/connect/tabsys.h @@ -1,181 +1,182 @@ -/*************** TabSys H Declares Source Code File (.H) ***************/ -/* Name: TABSYS.H Version 2.2 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2013 */ -/* */ -/* This file contains the XDB system tables classes declares. */ -/***********************************************************************/ -typedef class INIDEF *PINIDEF; -typedef class TDBINI *PTDBINI; -typedef class INICOL *PINICOL; -typedef class TDBXIN *PTDBXIN; -typedef class XINCOL *PXINCOL; - -/* --------------------------- INI classes --------------------------- */ - -/***********************************************************************/ -/* INI, XDB and XCL tables. */ -/***********************************************************************/ -class DllExport INIDEF : public TABDEF { /* INI table description */ - friend class TDBINI; - friend class TDBXIN; - friend class TDBXTB; - friend class TDBRTB; - friend class TDBXCL; - public: - // Constructor - INIDEF(void); - - // Implementation - virtual const char *GetType(void) {return "INI";} - - // Methods - virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); - virtual PTDB GetTable(PGLOBAL g, MODE m); - - protected: - // Members - char *Fn; /* Path/Name of corresponding file */ - char *Xname; /* The eventual table name */ - char Layout; /* R: Row, C: Column */ - int Ln; /* Length of section list buffer */ - }; // end of INIDEF - -/***********************************************************************/ -/* This is the class declaration for the INI tables. */ -/* These are tables represented by a INI like file. */ -/***********************************************************************/ -class TDBINI : public TDBASE { - friend class INICOL; - public: - // Constructor - TDBINI(PINIDEF tdp); - TDBINI(PTDBINI tdbp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_INI;} - virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBINI(this);} - - // Methods - virtual PTDB CopyOne(PTABS t); - virtual int GetRecpos(void) {return N;} - virtual int GetProgCur(void) {return N;} - virtual int GetAffectedRows(void) {return 0;} - virtual PSZ GetFile(PGLOBAL g) {return Ifile;} - virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;} - virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;} - virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;} - virtual int RowNumber(PGLOBAL g, bool b = false) {return N;} - char *GetSeclist(PGLOBAL g); - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetMaxSize(PGLOBAL g); - virtual bool OpenDB(PGLOBAL g); - virtual int ReadDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); - virtual int DeleteDB(PGLOBAL g, int irc); - virtual void CloseDB(PGLOBAL g); - - protected: - // Members - char *Ifile; // The INI file - char *Seclist; // The section list - char *Section; // The current section - int Seclen; // Length of seclist buffer - int N; // The current section index - }; // end of class TDBINI - -/***********************************************************************/ -/* Class INICOL: XDB table access method column descriptor. */ -/***********************************************************************/ -class INICOL : public COLBLK { - public: - // Constructors - INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); - INICOL(INICOL *colp, PTDB tdbp); // Constructor used in copy process - - // Implementation - virtual int GetAmType(void) {return TYPE_AM_INI;} - virtual void SetTo_Val(PVAL valp) {To_Val = valp;} - - // Methods - virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); - virtual void AllocBuf(PGLOBAL g); - - protected: - // Default constructor not to be used - INICOL(void) {} - - // Members - char *Valbuf; // To the key value buffer - int Flag; // Tells what set in value - int Long; // Buffer length - PVAL To_Val; // To value used for Update/Insert - }; // end of class INICOL - -/* --------------------------- XINI class ---------------------------- */ - -/***********************************************************************/ -/* This is the class declaration for the XINI tables. */ -/* These are tables represented by a INI like file */ -/* having 3 columns Section, Key, and Value. */ -/***********************************************************************/ -class TDBXIN : public TDBINI { - friend class XINCOL; - public: - // Constructor - TDBXIN(PINIDEF tdp); - TDBXIN(PTDBXIN tdbp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_INI;} - virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXIN(this);} - - // Methods - virtual PTDB CopyOne(PTABS t); - virtual int GetRecpos(void); - virtual bool SetRecpos(PGLOBAL g, int recpos); - virtual void ResetDB(void) - {Seclist = Section = Keycur = NULL; N = 0; Oldsec = -1;} - char *GetKeylist(PGLOBAL g, char *sec); - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetMaxSize(PGLOBAL g); - virtual bool OpenDB(PGLOBAL g); - virtual int ReadDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); - virtual int DeleteDB(PGLOBAL g, int irc); - - protected: - // Members - char *Keylist; // The key list - char *Keycur; // The current key - int Keylen; // Length of keylist buffer - short Oldsec; // Last current section - }; // end of class TDBXIN - -/***********************************************************************/ -/* Class XINCOL: XIN table access method column descriptor. */ -/***********************************************************************/ -class XINCOL : public INICOL { - public: - // Constructors - XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); - XINCOL(XINCOL *colp, PTDB tdbp); // Constructor used in copy process - - // Implementation - - // Methods - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); - - protected: - // Default constructor not to be used - XINCOL(void) {} - - // Members - }; // end of class XINICOL +/*************** TabSys H Declares Source Code File (.H) ***************/
+/* Name: TABSYS.H Version 2.3 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */
+/* */
+/* This file contains the XDB system tables classes declares. */
+/***********************************************************************/
+typedef class INIDEF *PINIDEF;
+typedef class TDBINI *PTDBINI;
+typedef class INICOL *PINICOL;
+typedef class TDBXIN *PTDBXIN;
+typedef class XINCOL *PXINCOL;
+
+/* --------------------------- INI classes --------------------------- */
+
+/***********************************************************************/
+/* INI, XDB and XCL tables. */
+/***********************************************************************/
+class DllExport INIDEF : public TABDEF { /* INI table description */
+ friend class TDBINI;
+ friend class TDBXIN;
+ friend class TDBXTB;
+ friend class TDBRTB;
+ friend class TDBXCL;
+ public:
+ // Constructor
+ INIDEF(void);
+
+ // Implementation
+ virtual const char *GetType(void) {return "INI";}
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE m);
+
+ protected:
+ // Members
+ char *Fn; /* Path/Name of corresponding file */
+ char *Xname; /* The eventual table name */
+ char Layout; /* R: Row, C: Column */
+ int Ln; /* Length of section list buffer */
+ }; // end of INIDEF
+
+/***********************************************************************/
+/* This is the class declaration for the INI tables. */
+/* These are tables represented by a INI like file. */
+/***********************************************************************/
+class TDBINI : public TDBASE {
+ friend class INICOL;
+ public:
+ // Constructor
+ TDBINI(PINIDEF tdp);
+ TDBINI(PTDBINI tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_INI;}
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBINI(this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+ virtual int GetRecpos(void) {return N;}
+ virtual int GetProgCur(void) {return N;}
+//virtual int GetAffectedRows(void) {return 0;}
+ virtual PSZ GetFile(PGLOBAL g) {return Ifile;}
+ virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;}
+ virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;}
+ virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;}
+ virtual int RowNumber(PGLOBAL g, bool b = false) {return N;}
+ char *GetSeclist(PGLOBAL g);
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+ virtual void CloseDB(PGLOBAL g);
+
+ protected:
+ // Members
+ char *Ifile; // The INI file
+ char *Seclist; // The section list
+ char *Section; // The current section
+ int Seclen; // Length of seclist buffer
+ int N; // The current section index
+ }; // end of class TDBINI
+
+/***********************************************************************/
+/* Class INICOL: XDB table access method column descriptor. */
+/***********************************************************************/
+class INICOL : public COLBLK {
+ public:
+ // Constructors
+ INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI");
+ INICOL(INICOL *colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ virtual int GetAmType(void) {return TYPE_AM_INI;}
+ virtual void SetTo_Val(PVAL valp) {To_Val = valp;}
+
+ // Methods
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+ virtual void AllocBuf(PGLOBAL g);
+
+ protected:
+ // Default constructor not to be used
+ INICOL(void) {}
+
+ // Members
+ char *Valbuf; // To the key value buffer
+ int Flag; // Tells what set in value
+ int Long; // Buffer length
+ PVAL To_Val; // To value used for Update/Insert
+ }; // end of class INICOL
+
+/* --------------------------- XINI class ---------------------------- */
+
+/***********************************************************************/
+/* This is the class declaration for the XINI tables. */
+/* These are tables represented by a INI like file */
+/* having 3 columns Section, Key, and Value. */
+/***********************************************************************/
+class TDBXIN : public TDBINI {
+ friend class XINCOL;
+ public:
+ // Constructor
+ TDBXIN(PINIDEF tdp);
+ TDBXIN(PTDBXIN tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_INI;}
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXIN(this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+ virtual int GetRecpos(void);
+ virtual bool SetRecpos(PGLOBAL g, int recpos);
+ virtual void ResetDB(void)
+ {Seclist = Section = Keycur = NULL; N = 0; Oldsec = -1;}
+ char *GetKeylist(PGLOBAL g, char *sec);
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual int Cardinality(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+
+ protected:
+ // Members
+ char *Keylist; // The key list
+ char *Keycur; // The current key
+ int Keylen; // Length of keylist buffer
+ short Oldsec; // Last current section
+ }; // end of class TDBXIN
+
+/***********************************************************************/
+/* Class XINCOL: XIN table access method column descriptor. */
+/***********************************************************************/
+class XINCOL : public INICOL {
+ public:
+ // Constructors
+ XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI");
+ XINCOL(XINCOL *colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+
+ // Methods
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+
+ protected:
+ // Default constructor not to be used
+ XINCOL(void) {}
+
+ // Members
+ }; // end of class XINICOL
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp index 0aeeb0b9d8d..f5a516ad1d0 100644 --- a/storage/connect/tabtbl.cpp +++ b/storage/connect/tabtbl.cpp @@ -350,7 +350,34 @@ bool TDBTBL::TestFil(PGLOBAL g, PCFIL filp, PTABLE tabp) } // end of TestFil /***********************************************************************/ -/* Sum up the sizes of all sub-tables. */ +/* Sum up the cardinality of all sub-tables. */ +/***********************************************************************/ +int TDBTBL::Cardinality(PGLOBAL g) + { + if (Cardinal < 0) { + int tsz; + + if (!Tablist && InitTableList(g)) + return 0; // Cannot be calculated at this stage + + Cardinal = 0; + + for (PTABLE tabp = Tablist; tabp; tabp = tabp->GetNext()) { + if ((tsz = tabp->GetTo_Tdb()->Cardinality(g)) < 0) { + Cardinal = -1; + return tsz; + } // endif mxsz + + Cardinal += tsz; + } // endfor i + + } // endif Cardinal + + return Cardinal; + } // end of Cardinality + +/***********************************************************************/ +/* Sum up the maximum sizes of all sub-tables. */ /***********************************************************************/ int TDBTBL::GetMaxSize(PGLOBAL g) { @@ -435,7 +462,7 @@ bool TDBTBL::OpenDB(PGLOBAL g) if ((CurTable = Tablist)) { Tdbp = (PTDBASE)CurTable->GetTo_Tdb(); - Tdbp->SetMode(Mode); +// Tdbp->SetMode(Mode); // Tdbp->ResetDB(); // Tdbp->ResetSize(); @@ -685,7 +712,7 @@ bool TDBTBM::OpenDB(PGLOBAL g) /*********************************************************************/ if ((CurTable = Tablist)) { Tdbp = (PTDBASE)CurTable->GetTo_Tdb(); - Tdbp->SetMode(Mode); +// Tdbp->SetMode(Mode); // Check and initialize the subtable columns for (PCOL cp = Columns; cp; cp = cp->GetNext()) diff --git a/storage/connect/tabtbl.h b/storage/connect/tabtbl.h index fc35179f2ea..8bf440985ea 100644 --- a/storage/connect/tabtbl.h +++ b/storage/connect/tabtbl.h @@ -78,6 +78,7 @@ class DllExport TDBTBL : public TDBPRX { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); virtual int RowNumber(PGLOBAL g, bool b = FALSE); virtual PCOL InsertSpecialColumn(PGLOBAL g, PCOL scp); diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp index 4b9046e08d1..f4a8f2ee470 100644 --- a/storage/connect/tabutil.cpp +++ b/storage/connect/tabutil.cpp @@ -313,7 +313,7 @@ bool PRXDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (!(tab = GetStringCatInfo(g, "Tabname", NULL))) { if (!def) { strcpy(g->Message, "Missing object table definition"); - return TRUE; + return true; } else tab = "Noname"; @@ -327,7 +327,7 @@ bool PRXDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Tablep = new(g) XTAB(tab, def); Tablep->SetQualifier(db); - return FALSE; + return false; } // end of DefineAM /***********************************************************************/ @@ -352,6 +352,28 @@ TDBPRX::TDBPRX(PPRXDEF tdp) : TDBASE(tdp) Tdbp = NULL; // The object table } // end of TDBPRX constructor +TDBPRX::TDBPRX(PGLOBAL g, PTDBPRX tdbp) : TDBASE(tdbp) + { + Tdbp = tdbp->Tdbp; + } // end of TDBPRX copy constructor + +// Method +PTDB TDBPRX::CopyOne(PTABS t) + { + PTDB tp; + PPRXCOL cp1, cp2; + PGLOBAL g = t->G; + + tp = new(g) TDBPRX(g, this); + + for (cp1 = (PPRXCOL)Columns; cp1; cp1 = (PPRXCOL)cp1->GetNext()) { + cp2 = new(g) PRXCOL(cp1, tp); // Make a copy + NewPointer(t, cp1, cp2); + } // endfor cp1 + + return tp; + } // end of CopyOne + /***********************************************************************/ /* Get the PTDB of the sub-table. */ /***********************************************************************/ @@ -403,7 +425,7 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b) if (mysql) { #if defined(MYSQL_SUPPORT) // Access sub-table via MySQL API - if (!(tdbp= cat->GetTable(g, tabp, MODE_READ, "MYPRX"))) { + if (!(tdbp= cat->GetTable(g, tabp, Mode, "MYPRX"))) { char buf[MAX_STR]; strcpy(buf, g->Message); @@ -415,6 +437,9 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b) if (db) ((PTDBMY)tdbp)->SetDatabase(tabp->GetQualifier()); + if (Mode == MODE_UPDATE || Mode == MODE_DELETE) + tdbp->SetName(Name); // For Make_Command + #else // !MYSQL_SUPPORT sprintf(g->Message, "%s.%s is not a CONNECT table", db, tblp->Name); @@ -423,7 +448,7 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b) } else { // Sub-table is a CONNECT table tabp->Next = To_Table; // For loop checking - tdbp = cat->GetTable(g, tabp); + tdbp = cat->GetTable(g, tabp, Mode); } // endif mysql if (s) { @@ -456,11 +481,12 @@ bool TDBPRX::InitTable(PGLOBAL g) if (!Tdbp) { // Get the table description block of this table if (!(Tdbp = GetSubTable(g, ((PPRXDEF)To_Def)->Tablep))) - return TRUE; + return true; +// Tdbp->SetMode(Mode); } // endif Tdbp - return FALSE; + return false; } // end of InitTable /***********************************************************************/ @@ -472,6 +498,21 @@ PCOL TDBPRX::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) } // end of MakeCol /***********************************************************************/ +/* PRX Cardinality: returns the number of rows in the table. */ +/***********************************************************************/ +int TDBPRX::Cardinality(PGLOBAL g) + { + if (Cardinal < 0) { + if (InitTable(g)) + return 0; + + Cardinal = Tdbp->Cardinality(g); + } // endif MaxSize + + return Cardinal; + } // end of GetMaxSize + +/***********************************************************************/ /* PRX GetMaxSize: returns the maximum number of rows in the table. */ /***********************************************************************/ int TDBPRX::GetMaxSize(PGLOBAL g) @@ -507,32 +548,49 @@ bool TDBPRX::OpenDB(PGLOBAL g) return Tdbp->OpenDB(g); } // endif use - if (Mode != MODE_READ) { - /*******************************************************************/ - /* Currently XCOL tables cannot be modified. */ - /*******************************************************************/ - strcpy(g->Message, "PROXY tables are read only"); - return TRUE; - } // endif Mode - if (InitTable(g)) - return TRUE; + return true; + else if (Mode != MODE_READ && (Read_Only || Tdbp->IsReadOnly())) { + strcpy(g->Message, "Cannot modify a read only table"); + return true; + } // endif tp /*********************************************************************/ /* Check and initialize the subtable columns. */ /*********************************************************************/ for (PCOL cp = Columns; cp; cp = cp->GetNext()) - if (((PPRXCOL)cp)->Init(g)) - return TRUE; + if (((PPRXCOL)cp)->Init(g, Tdbp)) + return true; + + /*********************************************************************/ + /* In Update mode, the updated column blocks must be distinct from */ + /* the read column blocks. So make a copy of the TDB and allocate */ + /* its column blocks in mode write (required by XML tables). */ + /*********************************************************************/ + if (Mode == MODE_UPDATE) { + PTDBASE utp; + + if (!(utp= (PTDBASE)Tdbp->Duplicate(g))) { + sprintf(g->Message, MSG(INV_UPDT_TABLE), Tdbp->GetName()); + return true; + } // endif tp + + for (PCOL cp = To_SetCols; cp; cp = cp->GetNext()) + if (((PPRXCOL)cp)->Init(g, utp)) + return true; + + } else if (Mode == MODE_DELETE) + Tdbp->SetNext(Next); /*********************************************************************/ /* Physically open the object table. */ /*********************************************************************/ if (Tdbp->OpenDB(g)) - return TRUE; + return true; + Tdbp->SetNext(NULL); Use = USE_OPEN; - return FALSE; + return false; } // end of OpenDB /***********************************************************************/ @@ -551,8 +609,7 @@ int TDBPRX::ReadDB(PGLOBAL g) /***********************************************************************/ int TDBPRX::WriteDB(PGLOBAL g) { - sprintf(g->Message, "%s tables are read only", To_Def->GetType()); - return RC_FX; + return Tdbp->WriteDB(g); } // end of WriteDB /***********************************************************************/ @@ -560,9 +617,7 @@ int TDBPRX::WriteDB(PGLOBAL g) /***********************************************************************/ int TDBPRX::DeleteDB(PGLOBAL g, int irc) { - sprintf(g->Message, "Delete not enabled for %s tables", - To_Def->GetType()); - return RC_FX; + return Tdbp->DeleteDB(g, irc); } // end of DeleteDB /***********************************************************************/ @@ -594,7 +649,7 @@ PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) //strcpy(F_Date, cdp->F_Date); Colp = NULL; To_Val = NULL; - Pseudo = FALSE; + Pseudo = false; Colnum = cdp->GetOffset(); // If columns are retrieved by number if (trace) @@ -603,29 +658,48 @@ PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) } // end of PRXCOL constructor /***********************************************************************/ +/* PRXCOL constructor used for copying columns. */ +/* tdbp is the pointer to the new table descriptor. */ +/***********************************************************************/ +PRXCOL::PRXCOL(PRXCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp) + { + Colp = col1->Colp; + To_Val = col1->To_Val; + Pseudo = col1->Pseudo; + Colnum = col1->Colnum; + } // end of PRXCOL copy constructor + +/***********************************************************************/ /* PRXCOL initialization routine. */ /* Look for the matching column in the object table. */ /***********************************************************************/ -bool PRXCOL::Init(PGLOBAL g) +bool PRXCOL::Init(PGLOBAL g, PTDBASE tp) { - PTDBPRX tdbp = (PTDBPRX)To_Tdb; + if (!tp) + tp = ((PTDBPRX)To_Tdb)->Tdbp; - if (!(Colp = tdbp->Tdbp->ColDB(g, Name, 0)) && Colnum) - Colp = tdbp->Tdbp->ColDB(g, NULL, Colnum); + if (!(Colp = tp->ColDB(g, Name, 0)) && Colnum) + Colp = tp->ColDB(g, NULL, Colnum); if (Colp) { + MODE mode = To_Tdb->GetMode(); + // May not have been done elsewhere Colp->InitValue(g); To_Val = Colp->GetValue(); + if (mode == MODE_INSERT || mode == MODE_UPDATE) + if (Colp->SetBuffer(g, Colp->GetValue(), true, false)) + return true; + // this may be needed by some tables (which?) Colp->SetColUse(ColUse); } else { - sprintf(g->Message, MSG(NO_MATCHING_COL), Name, tdbp->Tdbp->GetName()); - return TRUE; + sprintf(g->Message, MSG(NO_MATCHING_COL), Name, tp->GetName()); + return true; } // endif Colp - return FALSE; + return false; } // end of Init /***********************************************************************/ @@ -659,6 +733,21 @@ void PRXCOL::ReadColumn(PGLOBAL g) } // end of ReadColumn +/***********************************************************************/ +/* WriteColumn: */ +/***********************************************************************/ +void PRXCOL::WriteColumn(PGLOBAL g) + { + if (trace > 1) + htrc("PRX WriteColumn: name=%s\n", Name); + + if (Colp) { + To_Val->SetValue_pval(Value); + Colp->WriteColumn(g); + } // endif Colp + + } // end of WriteColumn + /* ---------------------------TDBTBC class --------------------------- */ /***********************************************************************/ diff --git a/storage/connect/tabutil.h b/storage/connect/tabutil.h index c87065befba..11f18be074a 100644 --- a/storage/connect/tabutil.h +++ b/storage/connect/tabutil.h @@ -57,13 +57,17 @@ class DllExport TDBPRX : public TDBASE { friend class PRXDEF; friend class PRXCOL; public: - // Constructor + // Constructors TDBPRX(PPRXDEF tdp); + TDBPRX(PGLOBAL g, PTDBPRX tdbp); // Implementation virtual AMT GetAmType(void) {return TYPE_AM_PRX;} + virtual PTDB Duplicate(PGLOBAL g) + {return (PTDB)new(g) TDBPRX(g, this);} // Methods + virtual PTDB CopyOne(PTABS t); virtual int GetRecpos(void) {return Tdbp->GetRecpos();} virtual void ResetDB(void) {Tdbp->ResetDB();} virtual int RowNumber(PGLOBAL g, bool b = FALSE); @@ -72,6 +76,7 @@ class DllExport TDBPRX : public TDBASE { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); virtual bool InitTable(PGLOBAL g); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); @@ -97,15 +102,19 @@ class DllExport PRXCOL : public COLBLK { public: // Constructors PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "PRX"); + PRXCOL(PRXCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation - virtual int GetAmType(void) {return TYPE_AM_PRX;} + virtual int GetAmType(void) {return TYPE_AM_PRX;} // Methods - virtual void Reset(void); - virtual bool IsSpecial(void) {return Pseudo;} - virtual void ReadColumn(PGLOBAL g); - bool Init(PGLOBAL g); + virtual void Reset(void); + virtual bool IsSpecial(void) {return Pseudo;} + virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) + {return false;} + virtual void ReadColumn(PGLOBAL g); + virtual void WriteColumn(PGLOBAL g); + bool Init(PGLOBAL g, PTDBASE tp = NULL); protected: // Default constructor not to be used diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp index 73dfef6a4d6..043d3c2c496 100644 --- a/storage/connect/tabvct.cpp +++ b/storage/connect/tabvct.cpp @@ -32,7 +32,7 @@ /***********************************************************************/ /***********************************************************************/ -/* Include relevant MariaDB header file. */ +/* Include relevant MariaDB header file. */ /***********************************************************************/ #include "my_global.h" #if defined(WIN32) @@ -95,7 +95,10 @@ bool VCTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { DOSDEF::DefineAM(g, "BIN", poff); - Estimate = GetIntCatInfo("Estimate", 0); + if ((Estimate = GetIntCatInfo("Estimate", 0))) + Elemt = MY_MIN(Elemt, Estimate); + + // Split treated as INT to get default value Split = GetIntCatInfo("Split", (Estimate) ? 0 : 1); Header = GetIntCatInfo("Header", 0); @@ -103,7 +106,7 @@ bool VCTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (Estimate && !Split && !Header) { char *fn = GetStringCatInfo(g, "Filename", "?"); - // No separate header file fo urbi tables + // No separate header file for urbi tables Header = (*fn == '?') ? 3 : 2; } // endif Estimate @@ -302,14 +305,19 @@ bool TDBVCT::OpenDB(PGLOBAL g) To_Kindex->Reset(); Txfp->Rewind(); + ResetBlockFilter(g); return false; } // endif Use /*********************************************************************/ /* Delete all is not handled using file mapping. */ /*********************************************************************/ - if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() == TYPE_AM_MAP) { - Txfp = new(g) VCTFAM((PVCTDEF)To_Def); + if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() == TYPE_AM_VMP) { + if (IsSplit()) + Txfp = new(g) VECFAM((PVCTDEF)To_Def); + else + Txfp = new(g) VCTFAM((PVCTDEF)To_Def); + Txfp->SetTdbp(this); } // endif Mode @@ -324,6 +332,11 @@ bool TDBVCT::OpenDB(PGLOBAL g) Use = USE_OPEN; // Do it now in case we are recursively called /*********************************************************************/ + /* Allocate the block filter tree if evaluation is possible. */ + /*********************************************************************/ + To_BlkFil = InitBlockFilter(g, To_Filter); + + /*********************************************************************/ /* Reset buffer access according to indexing and to mode. */ /*********************************************************************/ Txfp->ResetBuffer(g); @@ -382,7 +395,7 @@ void TDBVCT::CloseDB(PGLOBAL g) To_Kindex = NULL; } // endif - Txfp->CloseTableFile(g); + Txfp->CloseTableFile(g, false); } // end of CloseDB // ------------------------ VCTCOL functions ---------------------------- diff --git a/storage/connect/tabvct.h b/storage/connect/tabvct.h index 7dc416a5779..0a67a5e03b2 100644 --- a/storage/connect/tabvct.h +++ b/storage/connect/tabvct.h @@ -1,121 +1,123 @@ -/*************** TabVct H Declares Source Code File (.H) ***************/ -/* Name: TABVCT.H Version 3.4 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 1999-2011 */ -/* */ -/* This file contains the TDBVCT class declares. */ -/***********************************************************************/ -#ifndef __TABVCT__ -#define __TABVCT__ - -#include "tabfix.h" -#if defined(UNIX) -//#include <string.h.SUNWCCh> -#endif - -typedef class TDBVCT *PTDBVCT; -typedef class VCTCOL *PVCTCOL; - -/***********************************************************************/ -/* VCT table. */ -/***********************************************************************/ -class DllExport VCTDEF : public DOSDEF { /* Logical table description */ - friend class VCTFAM; - friend class VECFAM; - friend class VMPFAM; - public: - // Constructor - VCTDEF(void) {Split = Estimate = Header = 0;} - - // Implementation - virtual const char *GetType(void) {return "VCT";} - int GetEstimate(void) {return Estimate;} - - // Methods - virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); - virtual PTDB GetTable(PGLOBAL g, MODE mode); - - protected: - int MakeFnPattern(char *fpat); - - // Members - int Split; /* Columns in separate files */ - int Estimate; /* Estimated maximum size of table */ - int Header; /* 0: no, 1: separate, 2: in data file */ - }; // end of VCTDEF - -/***********************************************************************/ -/* This is the DOS/UNIX Access Method class declaration for files */ -/* in blocked vector format. In each block containing "Elements" */ -/* records, values of each columns are consecutively stored (vector). */ -/***********************************************************************/ -class DllExport TDBVCT : public TDBFIX { - friend class VCTCOL; - friend class VCTFAM; - friend class VCMFAM; - friend class VECFAM; - friend class VMPFAM; - public: - // Constructors - TDBVCT(PVCTDEF tdp, PTXF txfp); - TDBVCT(PGLOBAL g, PTDBVCT tdbp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_VCT;} - virtual PTDB Duplicate(PGLOBAL g) - {return (PTDB)new(g) TDBVCT(g, this);} - - // Methods - virtual PTDB CopyOne(PTABS t); - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual bool OpenDB(PGLOBAL g); - virtual int ReadDB(PGLOBAL g); - virtual void CloseDB(PGLOBAL g); - - protected: - // Members - }; // end of class TDBVCT - -/***********************************************************************/ -/* Class VCTCOL: VCT access method column descriptor. */ -/* This A.M. is used for file having column wise organization. */ -/***********************************************************************/ -class DllExport VCTCOL : public DOSCOL { - friend class TDBVCT; - friend class VCTFAM; - friend class VCMFAM; - friend class VECFAM; - friend class VMPFAM; - friend class BGVFAM; - public: - // Constructors - VCTCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i); - VCTCOL(VCTCOL *colp, PTDB tdbp); // Constructor used in copy process - - // Implementation - virtual int GetAmType(void) {return TYPE_AM_VCT;} - - // Methods - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); - virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); - virtual void SetOk(void); - - protected: - virtual void ReadBlock(PGLOBAL g); - virtual void WriteBlock(PGLOBAL g); - - VCTCOL(void) {} // Default constructor not to be used - - // Members - PVBLK Blk; // Block buffer - int Clen; // Internal length in table - int ColBlk; // Block pointed by column - int ColPos; // Last position read - int Modif; // Number of modified lines in block - }; // end of class VCTCOL - -#endif // __TABVCT__ - +/*************** TabVct H Declares Source Code File (.H) ***************/
+/* Name: TABVCT.H Version 3.4 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 1999-2011 */
+/* */
+/* This file contains the TDBVCT class declares. */
+/***********************************************************************/
+#ifndef __TABVCT__
+#define __TABVCT__
+
+#include "tabfix.h"
+#if defined(UNIX)
+//#include <string.h.SUNWCCh>
+#endif
+
+typedef class TDBVCT *PTDBVCT;
+typedef class VCTCOL *PVCTCOL;
+
+/***********************************************************************/
+/* VCT table. */
+/***********************************************************************/
+class DllExport VCTDEF : public DOSDEF { /* Logical table description */
+ friend class TDBVCT;
+ friend class VCTFAM;
+ friend class VECFAM;
+ friend class VMPFAM;
+ public:
+ // Constructor
+ VCTDEF(void) {Split = false; Estimate = Header = 0;}
+
+ // Implementation
+ virtual const char *GetType(void) {return "VCT";}
+ int GetEstimate(void) {return Estimate;}
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE mode);
+
+ protected:
+ int MakeFnPattern(char *fpat);
+
+ // Members
+ bool Split; /* Columns in separate files */
+ int Estimate; /* Estimated maximum size of table */
+ int Header; /* 0: no, 1: separate, 2: in data file */
+ }; // end of VCTDEF
+
+/***********************************************************************/
+/* This is the DOS/UNIX Access Method class declaration for files */
+/* in blocked vector format. In each block containing "Elements" */
+/* records, values of each columns are consecutively stored (vector). */
+/***********************************************************************/
+class DllExport TDBVCT : public TDBFIX {
+ friend class VCTCOL;
+ friend class VCTFAM;
+ friend class VCMFAM;
+ friend class VECFAM;
+ friend class VMPFAM;
+ public:
+ // Constructors
+ TDBVCT(PVCTDEF tdp, PTXF txfp);
+ TDBVCT(PGLOBAL g, PTDBVCT tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_VCT;}
+ virtual PTDB Duplicate(PGLOBAL g)
+ {return (PTDB)new(g) TDBVCT(g, this);}
+ bool IsSplit(void) {return ((VCTDEF*)To_Def)->Split;}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual void CloseDB(PGLOBAL g);
+
+ protected:
+ // Members
+ }; // end of class TDBVCT
+
+/***********************************************************************/
+/* Class VCTCOL: VCT access method column descriptor. */
+/* This A.M. is used for file having column wise organization. */
+/***********************************************************************/
+class DllExport VCTCOL : public DOSCOL {
+ friend class TDBVCT;
+ friend class VCTFAM;
+ friend class VCMFAM;
+ friend class VECFAM;
+ friend class VMPFAM;
+ friend class BGVFAM;
+ public:
+ // Constructors
+ VCTCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i);
+ VCTCOL(VCTCOL *colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ virtual int GetAmType(void) {return TYPE_AM_VCT;}
+
+ // Methods
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ virtual void SetOk(void);
+
+ protected:
+ virtual void ReadBlock(PGLOBAL g);
+ virtual void WriteBlock(PGLOBAL g);
+
+ VCTCOL(void) {} // Default constructor not to be used
+
+ // Members
+ PVBLK Blk; // Block buffer
+ int Clen; // Internal length in table
+ int ColBlk; // Block pointed by column
+ int ColPos; // Last position read
+ int Modif; // Number of modified lines in block
+ }; // end of class VCTCOL
+
+#endif // __TABVCT__
+
diff --git a/storage/connect/tabwmi.h b/storage/connect/tabwmi.h index 8ff5262941e..6f25c0de258 100644 --- a/storage/connect/tabwmi.h +++ b/storage/connect/tabwmi.h @@ -1,150 +1,151 @@ -// TABWMI.H Olivier Bertrand 2012 -// WMI: Virtual table to Get WMI information -#define _WIN32_DCOM -#include <wbemidl.h> -# pragma comment(lib, "wbemuuid.lib") -#include <iostream> -using namespace std; -#include <comdef.h> - -/***********************************************************************/ -/* Definitions. */ -/***********************************************************************/ -typedef class WMIDEF *PWMIDEF; -typedef class TDBWMI *PTDBWMI; -typedef class WMICOL *PWMICOL; -typedef class TDBWCL *PTDBWCL; -typedef class WCLCOL *PWCLCOL; - -/***********************************************************************/ -/* Structure used by WMI column info functions. */ -/***********************************************************************/ -typedef struct _WMIutil { - IWbemServices *Svc; - IWbemClassObject *Cobj; -} WMIUTIL, *PWMIUT; - -/***********************************************************************/ -/* Functions used externally. */ -/***********************************************************************/ -PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info); - -/* -------------------------- WMI classes ---------------------------- */ - -/***********************************************************************/ -/* WMI: Virtual table to get the WMI information. */ -/***********************************************************************/ -class WMIDEF : public TABDEF { /* Logical table description */ - friend class TDBWMI; - friend class TDBWCL; - friend class TDBWCX; - public: - // Constructor - WMIDEF(void) {Pseudo = 3; Nspace = NULL; Wclass = NULL; Ems = 0;} - - // Implementation - virtual const char *GetType(void) {return "WMI";} - - // Methods - virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); - virtual PTDB GetTable(PGLOBAL g, MODE m); - - protected: - // Members - char *Nspace; - char *Wclass; - int Ems; - }; // end of WMIDEF - -/***********************************************************************/ -/* This is the class declaration for the WMI table. */ -/***********************************************************************/ -class TDBWMI : public TDBASE { - friend class WMICOL; - public: - // Constructor - TDBWMI(PWMIDEF tdp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_WMI;} - - // Methods - virtual int GetRecpos(void); - virtual int GetProgCur(void) {return N;} - virtual int RowNumber(PGLOBAL g, bool b = false) {return N + 1;} - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetMaxSize(PGLOBAL g); - virtual bool OpenDB(PGLOBAL g); - virtual int ReadDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); - virtual int DeleteDB(PGLOBAL g, int irc); - virtual void CloseDB(PGLOBAL g); - - protected: - // Specific routines - bool Initialize(PGLOBAL g); - char *MakeWQL(PGLOBAL g); - void DoubleSlash(PGLOBAL g); - bool GetWMIInfo(PGLOBAL g); - - // Members - IWbemServices *Svc; // IWbemServices pointer - IEnumWbemClassObject *Enumerator; - IWbemClassObject *ClsObj; - char *Nspace; // Namespace - char *Wclass; // Class name - char *ObjPath; // Used for direct access - char *Kvp; // Itou - int Ems; // Estimated max size - PCOL Kcol; // Key column - HRESULT Res; - PVBLK Vbp; - bool Init; - bool Done; - ULONG Rc; - int N; // Row number - }; // end of class TDBWMI - -/***********************************************************************/ -/* Class WMICOL: WMI Address column. */ -/***********************************************************************/ -class WMICOL : public COLBLK { - friend class TDBWMI; - public: - // Constructors - WMICOL(PCOLDEF cdp, PTDB tdbp, int n); - - // Implementation - virtual int GetAmType(void) {return TYPE_AM_WMI;} - - // Methods - virtual void ReadColumn(PGLOBAL g); - - protected: - WMICOL(void) {} // Default constructor not to be used - - // Members - PTDBWMI Tdbp; // Points to WMI table block - VARIANT Prop; // Property value - CIMTYPE Ctype; // CIM Type - HRESULT Res; - }; // end of class WMICOL - -/***********************************************************************/ -/* This is the class declaration for the WMI catalog table. */ -/***********************************************************************/ -class TDBWCL : public TDBCAT { - public: - // Constructor - TDBWCL(PWMIDEF tdp); - - protected: - // Specific routines - virtual PQRYRES GetResult(PGLOBAL g); - - // Members - char *Nsp; // Name space - char *Cls; // Class - }; // end of class TDBWCL +// TABWMI.H Olivier Bertrand 2012
+// WMI: Virtual table to Get WMI information
+#define _WIN32_DCOM
+#include <wbemidl.h>
+# pragma comment(lib, "wbemuuid.lib")
+#include <iostream>
+using namespace std;
+#include <comdef.h>
+
+/***********************************************************************/
+/* Definitions. */
+/***********************************************************************/
+typedef class WMIDEF *PWMIDEF;
+typedef class TDBWMI *PTDBWMI;
+typedef class WMICOL *PWMICOL;
+typedef class TDBWCL *PTDBWCL;
+typedef class WCLCOL *PWCLCOL;
+
+/***********************************************************************/
+/* Structure used by WMI column info functions. */
+/***********************************************************************/
+typedef struct _WMIutil {
+ IWbemServices *Svc;
+ IWbemClassObject *Cobj;
+} WMIUTIL, *PWMIUT;
+
+/***********************************************************************/
+/* Functions used externally. */
+/***********************************************************************/
+PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info);
+
+/* -------------------------- WMI classes ---------------------------- */
+
+/***********************************************************************/
+/* WMI: Virtual table to get the WMI information. */
+/***********************************************************************/
+class WMIDEF : public TABDEF { /* Logical table description */
+ friend class TDBWMI;
+ friend class TDBWCL;
+ friend class TDBWCX;
+ public:
+ // Constructor
+ WMIDEF(void) {Pseudo = 3; Nspace = NULL; Wclass = NULL; Ems = 0;}
+
+ // Implementation
+ virtual const char *GetType(void) {return "WMI";}
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE m);
+
+ protected:
+ // Members
+ char *Nspace;
+ char *Wclass;
+ int Ems;
+ }; // end of WMIDEF
+
+/***********************************************************************/
+/* This is the class declaration for the WMI table. */
+/***********************************************************************/
+class TDBWMI : public TDBASE {
+ friend class WMICOL;
+ public:
+ // Constructor
+ TDBWMI(PWMIDEF tdp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_WMI;}
+
+ // Methods
+ virtual int GetRecpos(void);
+ virtual int GetProgCur(void) {return N;}
+ virtual int RowNumber(PGLOBAL g, bool b = false) {return N + 1;}
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual int Cardinality(PGLOBAL g) {return GetMaxSize(g);} + virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+ virtual void CloseDB(PGLOBAL g);
+
+ protected:
+ // Specific routines
+ bool Initialize(PGLOBAL g);
+ char *MakeWQL(PGLOBAL g);
+ void DoubleSlash(PGLOBAL g);
+ bool GetWMIInfo(PGLOBAL g);
+
+ // Members
+ IWbemServices *Svc; // IWbemServices pointer
+ IEnumWbemClassObject *Enumerator;
+ IWbemClassObject *ClsObj;
+ char *Nspace; // Namespace
+ char *Wclass; // Class name
+ char *ObjPath; // Used for direct access
+ char *Kvp; // Itou
+ int Ems; // Estimated max size
+ PCOL Kcol; // Key column
+ HRESULT Res;
+ PVBLK Vbp;
+ bool Init;
+ bool Done;
+ ULONG Rc;
+ int N; // Row number
+ }; // end of class TDBWMI
+
+/***********************************************************************/
+/* Class WMICOL: WMI Address column. */
+/***********************************************************************/
+class WMICOL : public COLBLK {
+ friend class TDBWMI;
+ public:
+ // Constructors
+ WMICOL(PCOLDEF cdp, PTDB tdbp, int n);
+
+ // Implementation
+ virtual int GetAmType(void) {return TYPE_AM_WMI;}
+
+ // Methods
+ virtual void ReadColumn(PGLOBAL g);
+
+ protected:
+ WMICOL(void) {} // Default constructor not to be used
+
+ // Members
+ PTDBWMI Tdbp; // Points to WMI table block
+ VARIANT Prop; // Property value
+ CIMTYPE Ctype; // CIM Type
+ HRESULT Res;
+ }; // end of class WMICOL
+
+/***********************************************************************/
+/* This is the class declaration for the WMI catalog table. */
+/***********************************************************************/
+class TDBWCL : public TDBCAT {
+ public:
+ // Constructor
+ TDBWCL(PWMIDEF tdp);
+
+ protected:
+ // Specific routines
+ virtual PQRYRES GetResult(PGLOBAL g);
+
+ // Members
+ char *Nsp; // Name space
+ char *Cls; // Class
+ }; // end of class TDBWCL
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index 1e9c172cdb3..88c029aefd2 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -366,7 +366,7 @@ int TDBXML::LoadTableFile(PGLOBAL g, char *filename) /*********************************************************************/ /* Firstly we check whether this file have been already loaded. */ /*********************************************************************/ - if (Mode == MODE_READ) + if (Mode == MODE_READ || Mode == MODE_ANY) for (fp = dup->Openlist; fp; fp = fp->Next) if (fp->Type == type && fp->Length && fp->Count) if (!stricmp(fp->Fname, filename)) @@ -522,8 +522,8 @@ bool TDBXML::Initialize(PGLOBAL g) To_Xb = Docp->LinkXblock(g, Mode, rc, filename); // Add a CONNECT comment node -// sprintf(buf, MSG(CREATED_PLUGDB), version); - sprintf(buf, " Created by CONNECT %s ", version); +// sprintf(buf, " Created by CONNECT %s ", version); + strcpy(buf, " Created by the MariaDB CONNECT Storage Engine"); Docp->AddComment(g, buf); if (XmlDB) { diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h index 23c46c03a8b..a3dc0a2b54c 100644 --- a/storage/connect/tabxml.h +++ b/storage/connect/tabxml.h @@ -1,4 +1,3 @@ - /*************** Tabxml H Declares Source Code File (.H) ***************/ /* Name: TABXML.H Version 1.6 */ /* */ diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index de32be8cdb3..b5f835c9cc9 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -1,4 +1,4 @@ -/* Copyright (C) Olivier Bertrand 2004 - 2012 +/* Copyright (C) Olivier Bertrand 2004 - 2014 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -68,7 +68,7 @@ user_connect::user_connect(THD *thd, const char *dbn) g= NULL; last_query_id= 0; count= 0; - + // Statistics nrd= fnd= nfd= 0; tb1= 0; @@ -95,9 +95,9 @@ bool user_connect::user_init() PDBUSER dup= NULL; // Areasize= 64M because of VEC tables. Should be parameterisable -//g= PlugInit(NULL, 67108864); -//g= PlugInit(NULL, 134217728); // 128M was because of old embedded tests - g= PlugInit(NULL, worksize); +//g= PlugInit(NULL, 67108864); +//g= PlugInit(NULL, 134217728); // 128M was because of old embedded tests + g= PlugInit(NULL, worksize); // Check whether the initialization is complete if (!g || !g->Sarea || PlugSubSet(g, g->Sarea, g->Sarea_Size) @@ -162,6 +162,7 @@ bool user_connect::CheckCleanup(void) g->Xchk = NULL; g->Createas = 0; g->Alchecked = 0; + g->Mrr = 0; last_query_id= thdp->query_id; if (trace) diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp index ded7240cb2e..e435a49cbd2 100644 --- a/storage/connect/valblk.cpp +++ b/storage/connect/valblk.cpp @@ -101,6 +101,9 @@ PVBLK AllocValBlock(PGLOBAL g, void *mp, int type, int nval, int len, blkp = new(g) TYPBLK<char>(mp, nval, type); break; + case TYPE_PCHAR: + blkp = new(g) PTRBLK(g, mp, nval); + break; default: sprintf(g->Message, MSG(BAD_VALBLK_TYPE), type); return NULL; @@ -468,6 +471,36 @@ template <> uchar TYPBLK<uchar>::GetTypedValue(PVBLK blk, int n) {return blk->GetUTinyValue(n);} +/***********************************************************************/ +/* Set one value in a block if val is less than the current value. */ +/***********************************************************************/ +template <class TYPE> +void TYPBLK<TYPE>::SetMin(PVAL valp, int n) + { + CheckParms(valp, n) + TYPE tval = GetTypedValue(valp); + TYPE& tmin = Typp[n]; + + if (tval < tmin) + tmin = tval; + + } // end of SetMin + +/***********************************************************************/ +/* Set one value in a block if val is greater than the current value. */ +/***********************************************************************/ +template <class TYPE> +void TYPBLK<TYPE>::SetMax(PVAL valp, int n) + { + CheckParms(valp, n) + TYPE tval = GetTypedValue(valp); + TYPE& tmin = Typp[n]; + + if (tval > tmin) + tmin = tval; + + } // end of SetMax + #if 0 /***********************************************************************/ /* Set many values in a block from values in another block. */ @@ -802,6 +835,36 @@ void CHRBLK::SetValue(PVBLK pv, int n1, int n2) SetNull(n1, b); } // end of SetValue +/***********************************************************************/ +/* Set one value in a block if val is less than the current value. */ +/***********************************************************************/ +void CHRBLK::SetMin(PVAL valp, int n) + { + CheckParms(valp, n) + CheckBlanks + char *vp = valp->GetCharValue(); + char *bp = Chrp + n * Long; + + if (((Ci) ? strnicmp(vp, bp, Long) : strncmp(vp, bp, Long)) < 0) + memcpy(bp, vp, Long); + + } // end of SetMin + +/***********************************************************************/ +/* Set one value in a block if val is greater than the current value. */ +/***********************************************************************/ +void CHRBLK::SetMax(PVAL valp, int n) + { + CheckParms(valp, n) + CheckBlanks + char *vp = valp->GetCharValue(); + char *bp = Chrp + n * Long; + + if (((Ci) ? strnicmp(vp, bp, Long) : strncmp(vp, bp, Long)) > 0) + memcpy(bp, vp, Long); + + } // end of SetMax + #if 0 /***********************************************************************/ /* Set many values in a block from values in another block. */ @@ -1127,6 +1190,34 @@ void STRBLK::SetValue(char *sp, uint len, int n) } // end of SetValue /***********************************************************************/ +/* Set one value in a block if val is less than the current value. */ +/***********************************************************************/ +void STRBLK::SetMin(PVAL valp, int n) + { + CheckParms(valp, n) + char *vp = valp->GetCharValue(); + char *bp = Strp[n]; + + if (strcmp(vp, bp) < 0) + SetValue(valp, n); + + } // end of SetMin + +/***********************************************************************/ +/* Set one value in a block if val is greater than the current value. */ +/***********************************************************************/ +void STRBLK::SetMax(PVAL valp, int n) + { + CheckParms(valp, n) + char *vp = valp->GetCharValue(); + char *bp = Strp[n]; + + if (strcmp(vp, bp) > 0) + SetValue(valp, n); + + } // end of SetMax + +/***********************************************************************/ /* Move one value from i to j. */ /***********************************************************************/ void STRBLK::Move(int i, int j) @@ -1265,5 +1356,61 @@ void DATBLK::SetValue(PSZ p, int n) } // end of SetValue + +/* -------------------------- Class PTRBLK --------------------------- */ + +/***********************************************************************/ +/* Compare two values of the block. */ +/***********************************************************************/ +int PTRBLK::CompVal(int i1, int i2) + { + return (Strp[i1] > Strp[i2]) ? 1 : (Strp[i1] < Strp[i2]) ? (-1) : 0; + } // end of CompVal + + +/* -------------------------- Class MBVALS --------------------------- */ + +/***********************************************************************/ +/* Allocate a value block according to type,len, and nb of values. */ +/***********************************************************************/ +PVBLK MBVALS::Allocate(PGLOBAL g, int type, int len, int prec, + int n, bool sub) + { + Mblk.Sub = sub; + Mblk.Size = n * GetTypeSize(type, len); + + if (!PlgDBalloc(g, NULL, Mblk)) { + sprintf(g->Message, MSG(ALLOC_ERROR), "MBVALS::Allocate"); + return NULL; + } else + Vblk = AllocValBlock(g, Mblk.Memp, type, n, len, prec, + TRUE, TRUE, FALSE); + + return Vblk; + } // end of Allocate + +/***********************************************************************/ +/* Reallocate the value block according to the new size. */ +/***********************************************************************/ +bool MBVALS::ReAllocate(PGLOBAL g, int n) + { + if (!PlgDBrealloc(g, NULL, Mblk, n * Vblk->GetVlen())) { + sprintf(g->Message, MSG(ALLOC_ERROR), "MBVALS::ReAllocate"); + return TRUE; + } else + Vblk->ReAlloc(Mblk.Memp, n); + + return FALSE; + } // end of ReAllocate + +/***********************************************************************/ +/* Free the value block. */ +/***********************************************************************/ +void MBVALS::Free(void) + { + PlgDBfree(Mblk); + Vblk = NULL; + } // end of Free + /* ------------------------- End of Valblk --------------------------- */ diff --git a/storage/connect/valblk.h b/storage/connect/valblk.h index a9b1debe098..654db0b57b7 100644 --- a/storage/connect/valblk.h +++ b/storage/connect/valblk.h @@ -18,11 +18,41 @@ /***********************************************************************/ /* Utility used to allocate value blocks. */ /***********************************************************************/ -DllExport PVBLK AllocValBlock(PGLOBAL, void*, int, int, int, int, +DllExport PVBLK AllocValBlock(PGLOBAL, void*, int, int, int, int, bool, bool, bool); const char *GetFmt(int type, bool un = false); /***********************************************************************/ +/* DB static external variables. */ +/***********************************************************************/ +extern MBLOCK Nmblk; /* Used to initialize MBLOCK's */ + +/***********************************************************************/ +/* Class MBVALS is a utility class for (re)allocating VALBLK's. */ +/***********************************************************************/ +class MBVALS : public BLOCK { +//friend class LSTBLK; + friend class ARRAY; + public: + // Constructors + MBVALS(void) {Vblk = NULL; Mblk = Nmblk;} + + // Methods + void *GetMemp(void) {return Mblk.Memp;} + PVBLK Allocate(PGLOBAL g, int type, int len, int prec, + int n, bool sub = FALSE); + bool ReAllocate(PGLOBAL g, int n); + void Free(void); + + protected: + // Members + PVBLK Vblk; // Pointer to VALBLK + MBLOCK Mblk; // The memory block + }; // end of class MBVALS + +typedef class MBVALS *PMBV; + +/***********************************************************************/ /* Class VALBLK represent a base class for variable blocks. */ /***********************************************************************/ class VALBLK : public BLOCK { @@ -78,6 +108,8 @@ class VALBLK : public BLOCK { virtual void SetValue(char *sp, uint len, int n) {assert(false);} virtual void SetValue(PVAL valp, int n) = 0; virtual void SetValue(PVBLK pv, int n1, int n2) = 0; + virtual void SetMin(PVAL valp, int n) = 0; + virtual void SetMax(PVAL valp, int n) = 0; virtual void Move(int i, int j) = 0; virtual int CompVal(PVAL vp, int n) = 0; virtual int CompVal(int i1, int i2) = 0; @@ -153,6 +185,8 @@ class TYPBLK : public VALBLK { {Typp[n] = (TYPE)cval; SetNull(n, false);} virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); + virtual void SetMin(PVAL valp, int n); + virtual void SetMax(PVAL valp, int n); virtual void Move(int i, int j); virtual int CompVal(PVAL vp, int n); virtual int CompVal(int i1, int i2); @@ -203,6 +237,8 @@ class CHRBLK : public VALBLK { virtual void SetValue(char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); + virtual void SetMin(PVAL valp, int n); + virtual void SetMax(PVAL valp, int n); virtual void Move(int i, int j); virtual int CompVal(PVAL vp, int n); virtual int CompVal(int i1, int i2); @@ -213,11 +249,11 @@ class CHRBLK : public VALBLK { protected: // Members - char* const &Chrp; // Pointer to char buffer - PSZ Valp; // Used to make a zero ended value - bool Blanks; // True for right filling with blanks - bool Ci; // True if case insensitive - int Long; // Length of each string + char* const &Chrp; // Pointer to char buffer + PSZ Valp; // Used to make a zero ended value + bool Blanks; // True for right filling with blanks + bool Ci; // True if case insensitive + int Long; // Length of each string }; // end of class CHRBLK /***********************************************************************/ @@ -254,6 +290,8 @@ class STRBLK : public VALBLK { virtual void SetValue(char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); + virtual void SetMin(PVAL valp, int n); + virtual void SetMax(PVAL valp, int n); virtual void Move(int i, int j); virtual int CompVal(PVAL vp, int n); virtual int CompVal(int i1, int i2); @@ -291,5 +329,28 @@ class DATBLK : public TYPBLK<int> { PVAL Dvalp; // Date value used to convert string }; // end of class DATBLK +/***********************************************************************/ +/* Class PTRBLK: represent a block of char pointers. */ +/* Currently this class is used only by the ARRAY class to make and */ +/* sort a list of char pointers. */ +/***********************************************************************/ +class PTRBLK : public STRBLK { + friend class ARRAY; + friend PVBLK AllocValBlock(PGLOBAL, void *, int, int, int, int, + bool, bool, bool); + protected: + // Constructors + PTRBLK(PGLOBAL g, void *mp, int size) : STRBLK(g, mp, size) {} + + // Implementation + + // Methods + virtual void SetValue(PSZ p, int n) {Strp[n] = p;} + virtual int CompVal(int i1, int i2); + + protected: + // Members + }; // end of class PTRBLK + #endif // __VALBLK__H__ diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index 12b6aced1cd..4c1c36369ef 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -92,6 +92,32 @@ PSZ strlwr(PSZ s); #endif // !WIN32 /***********************************************************************/ +/* Returns the bitmap representing the conditions that must not be */ +/* met when returning from TestValue for a given operator. */ +/* Bit one is EQ, bit 2 is LT, and bit 3 is GT. */ +/***********************************************************************/ +BYTE OpBmp(PGLOBAL g, OPVAL opc) + { + BYTE bt; + + switch (opc) { + case OP_IN: + case OP_EQ: bt = 0x06; break; + case OP_NE: bt = 0x01; break; + case OP_GT: bt = 0x03; break; + case OP_GE: bt = 0x02; break; + case OP_LT: bt = 0x05; break; + case OP_LE: bt = 0x04; break; + case OP_EXIST: bt = 0x00; break; + default: + sprintf(g->Message, MSG(BAD_FILTER_OP), opc); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } // endswitch opc + + return bt; + } // end of OpBmp + +/***********************************************************************/ /* Get a long long number from its character representation. */ /* IN p: Pointer to the numeric string */ /* IN n: The string length */ @@ -101,7 +127,7 @@ PSZ strlwr(PSZ s); /* OUT minus: Set to true if the number is negative */ /* Returned val: The resulting number */ /***********************************************************************/ -ulonglong CharToNumber(char *p, int n, ulonglong maxval, +ulonglong CharToNumber(char *p, int n, ulonglong maxval, bool un, bool *minus, bool *rc) { char *p2; @@ -110,7 +136,7 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval, if (minus) *minus = false; if (rc) *rc = false; - + // Eliminate leading blanks or 0 for (p2 = p + n; p < p2 && (*p == ' ' || *p == '0'); p++) ; @@ -158,6 +184,7 @@ PSZ GetTypeName(int type) case TYPE_TINY: name = "TINY"; break; case TYPE_DECIM: name = "DECIMAL"; break; case TYPE_BIN: name = "BINARY"; break; + case TYPE_PCHAR: name = "PCHAR"; break; default: name = "UNKNOWN"; break; } // endswitch type @@ -179,6 +206,7 @@ int GetTypeSize(int type, int len) case TYPE_DATE: len = sizeof(int); break; case TYPE_DOUBLE: len = sizeof(double); break; case TYPE_TINY: len = sizeof(char); break; + case TYPE_PCHAR: len = sizeof(char*); break; default: len = 0; } // endswitch type @@ -202,6 +230,7 @@ char *GetFormatType(int type) case TYPE_TINY: c = "T"; break; case TYPE_DECIM: c = "M"; break; case TYPE_BIN: c = "B"; break; + case TYPE_PCHAR: c = "P"; break; } // endswitch type return c; @@ -224,6 +253,7 @@ int GetFormatType(char c) case 'T': type = TYPE_TINY; break; case 'M': type = TYPE_DECIM; break; case 'B': type = TYPE_BIN; break; + case 'P': type = TYPE_PCHAR; break; } // endswitch type return type; @@ -284,6 +314,53 @@ const char *GetFmt(int type, bool un) } // end of GetFmt /***********************************************************************/ +/* ConvertType: what this function does is to determine the type to */ +/* which should be converted a value so no precision would be lost. */ +/* This can be a numeric type if num is true or non numeric if false. */ +/* Note: this is an ultra simplified version of this function that */ +/* should become more and more complex as new types are added. */ +/* Not evaluated types (TYPE_VOID or TYPE_UNDEF) return false from */ +/* IsType... functions so match does not prevent correct setting. */ +/***********************************************************************/ +int ConvertType(int target, int type, CONV kind, bool match) + { + switch (kind) { + case CNV_CHAR: + if (match && (!IsTypeChar(target) || !IsTypeChar(type))) + return TYPE_ERROR; + + return TYPE_STRING; + case CNV_NUM: + if (match && (!IsTypeNum(target) || !IsTypeNum(type))) + return TYPE_ERROR; + + return (target == TYPE_DOUBLE || type == TYPE_DOUBLE) ? TYPE_DOUBLE + : (target == TYPE_DATE || type == TYPE_DATE) ? TYPE_DATE + : (target == TYPE_BIGINT || type == TYPE_BIGINT) ? TYPE_BIGINT + : (target == TYPE_INT || type == TYPE_INT) ? TYPE_INT + : (target == TYPE_SHORT || type == TYPE_SHORT) ? TYPE_SHORT + : TYPE_TINY; + default: + if (target == TYPE_ERROR || target == type) + return type; + + if (match && ((IsTypeChar(target) && !IsTypeChar(type)) || + (IsTypeNum(target) && !IsTypeNum(type)))) + return TYPE_ERROR; + + return (target == TYPE_DOUBLE || type == TYPE_DOUBLE) ? TYPE_DOUBLE + : (target == TYPE_DATE || type == TYPE_DATE) ? TYPE_DATE + : (target == TYPE_BIGINT || type == TYPE_BIGINT) ? TYPE_BIGINT + : (target == TYPE_INT || type == TYPE_INT) ? TYPE_INT + : (target == TYPE_SHORT || type == TYPE_SHORT) ? TYPE_SHORT + : (target == TYPE_STRING || type == TYPE_STRING) ? TYPE_STRING + : (target == TYPE_TINY || type == TYPE_TINY) ? TYPE_TINY + : TYPE_ERROR; + } // endswitch kind + + } // end of ConvertType + +/***********************************************************************/ /* AllocateConstant: allocates a constant Value. */ /***********************************************************************/ PVAL AllocateValue(PGLOBAL g, void *value, short type) @@ -300,7 +377,7 @@ PVAL AllocateValue(PGLOBAL g, void *value, short type) case TYPE_SHORT: valp = new(g) TYPVAL<short>(*(short*)value, TYPE_SHORT); break; - case TYPE_INT: + case TYPE_INT: valp = new(g) TYPVAL<int>(*(int*)value, TYPE_INT); break; case TYPE_BIGINT: @@ -333,10 +410,10 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, case TYPE_STRING: valp = new(g) TYPVAL<PSZ>(g, (PSZ)NULL, len, prec); break; - case TYPE_DATE: + case TYPE_DATE: valp = new(g) DTVAL(g, len, prec, fmt); break; - case TYPE_INT: + case TYPE_INT: if (uns) valp = new(g) TYPVAL<uint>((uint)0, TYPE_INT, 0, true); else @@ -382,6 +459,74 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, return valp; } // end of AllocateValue +/***********************************************************************/ +/* Allocate a constant Value converted to newtype. */ +/* Can also be used to copy a Value eventually converted. */ +/***********************************************************************/ +PVAL AllocateValue(PGLOBAL g, PVAL valp, int newtype, int uns) + { + PSZ p, sp; + bool un = (uns < 0) ? false : (uns > 0) ? true : valp->IsUnsigned(); + + if (newtype == TYPE_VOID) // Means allocate a value of the same type + newtype = valp->GetType(); + + switch (newtype) { + case TYPE_STRING: + p = (PSZ)PlugSubAlloc(g, NULL, 1 + valp->GetValLen()); + + if ((sp = valp->GetCharString(p)) != p) + strcpy (p, sp); + + valp = new(g) TYPVAL<PSZ>(g, p, valp->GetValLen(), valp->GetValPrec()); + break; + case TYPE_SHORT: + if (un) + valp = new(g) TYPVAL<ushort>(valp->GetUShortValue(), + TYPE_SHORT, 0, true); + else + valp = new(g) TYPVAL<short>(valp->GetShortValue(), TYPE_SHORT); + + break; + case TYPE_INT: + if (un) + valp = new(g) TYPVAL<uint>(valp->GetUIntValue(), TYPE_INT, 0, true); + else + valp = new(g) TYPVAL<int>(valp->GetIntValue(), TYPE_INT); + + break; + case TYPE_BIGINT: + if (un) + valp = new(g) TYPVAL<ulonglong>(valp->GetUBigintValue(), + TYPE_BIGINT, 0, true); + else + valp = new(g) TYPVAL<longlong>(valp->GetBigintValue(), TYPE_BIGINT); + + break; + case TYPE_DATE: + valp = new(g) DTVAL(g, valp->GetIntValue()); + break; + case TYPE_DOUBLE: + valp = new(g) TYPVAL<double>(valp->GetFloatValue(), TYPE_DOUBLE, + valp->GetValPrec()); + break; + case TYPE_TINY: + if (un) + valp = new(g) TYPVAL<uchar>(valp->GetUTinyValue(), + TYPE_TINY, 0, true); + else + valp = new(g) TYPVAL<char>(valp->GetTinyValue(), TYPE_TINY); + + break; + default: + sprintf(g->Message, MSG(BAD_VALUE_TYPE), newtype); + return NULL; + } // endswitch type + + valp->SetGlobal(g); + return valp; + } // end of AllocateValue + /* -------------------------- Class VALUE ---------------------------- */ /***********************************************************************/ @@ -418,6 +563,18 @@ const char *VALUE::GetXfmt(void) return fmt; } // end of GetFmt +/***********************************************************************/ +/* Returns a BYTE indicating the comparison between two values. */ +/* Bit 1 indicates equality, Bit 2 less than, and Bit3 greater than. */ +/* More than 1 bit can be set only in the case of TYPE_LIST. */ +/***********************************************************************/ +BYTE VALUE::TestValue(PVAL vp) + { + int n = CompareValue(vp); + + return (n > 0) ? 0x04 : (n < 0) ? 0x02 : 0x01; + } // end of TestValue + /* -------------------------- Class TYPVAL ---------------------------- */ /***********************************************************************/ @@ -543,8 +700,8 @@ bool TYPVAL<TYPE>::SetValue_char(char *p, int n) { bool rc, minus; ulonglong maxval = MaxVal(); - ulonglong val = CharToNumber(p, n, maxval, Unsigned, &minus, &rc); - + ulonglong val = CharToNumber(p, n, maxval, Unsigned, &minus, &rc); + if (minus && val < maxval) Tval = (TYPE)(-(signed)val); else @@ -566,7 +723,7 @@ bool TYPVAL<double>::SetValue_char(char *p, int n) if (p) { char buf[64]; - for (; n > 0 && *p == ' '; p++) + for (; n > 0 && *p == ' '; p++) n--; memcpy(buf, p, MY_MIN(n, 31)); @@ -789,6 +946,24 @@ bool TYPVAL<TYPE>::IsEqual(PVAL vp, bool chktype) } // end of IsEqual /***********************************************************************/ +/* Compare values and returns 1, 0 or -1 according to comparison. */ +/* This function is used for evaluation of numeric filters. */ +/***********************************************************************/ +template <class TYPE> +int TYPVAL<TYPE>::CompareValue(PVAL vp) + { +//assert(vp->GetType() == Type); + + // Process filtering on numeric values. + TYPE n = GetTypedValue(vp); + +//if (trace) +// htrc(" Comparing: val=%d,%d\n", Tval, n); + + return (Tval > n) ? 1 : (Tval < n) ? (-1) : 0; + } // end of CompareValue + +/***********************************************************************/ /* FormatValue: This function set vp (a STRING value) to the string */ /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ @@ -870,11 +1045,11 @@ TYPVAL<PSZ>::TYPVAL(PGLOBAL g, PSZ s, int n, int c) if (!s) { if (g) { - Strp = (char *)PlugSubAlloc(g, NULL, Len + 1); - Strp[Len] = '\0'; - } else - assert(false); - + Strp = (char *)PlugSubAlloc(g, NULL, Len + 1); + Strp[Len] = '\0'; + } else + assert(false); + } else Strp = s; @@ -888,8 +1063,8 @@ TYPVAL<PSZ>::TYPVAL(PGLOBAL g, PSZ s, int n, int c) char TYPVAL<PSZ>::GetTinyValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX8, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX8, false, &m); + return (m && val < INT_MAX8) ? (char)(-(signed)val) : (char)val; } // end of GetTinyValue @@ -898,7 +1073,7 @@ char TYPVAL<PSZ>::GetTinyValue(void) /***********************************************************************/ uchar TYPVAL<PSZ>::GetUTinyValue(void) { - return (uchar)CharToNumber(Strp, strlen(Strp), UINT_MAX8, true); + return (uchar)CharToNumber(Strp, strlen(Strp), UINT_MAX8, true); } // end of GetUTinyValue /***********************************************************************/ @@ -907,8 +1082,8 @@ uchar TYPVAL<PSZ>::GetUTinyValue(void) short TYPVAL<PSZ>::GetShortValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX16, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX16, false, &m); + return (m && val < INT_MAX16) ? (short)(-(signed)val) : (short)val; } // end of GetShortValue @@ -917,7 +1092,7 @@ short TYPVAL<PSZ>::GetShortValue(void) /***********************************************************************/ ushort TYPVAL<PSZ>::GetUShortValue(void) { - return (ushort)CharToNumber(Strp, strlen(Strp), UINT_MAX16, true); + return (ushort)CharToNumber(Strp, strlen(Strp), UINT_MAX16, true); } // end of GetUshortValue /***********************************************************************/ @@ -926,8 +1101,8 @@ ushort TYPVAL<PSZ>::GetUShortValue(void) int TYPVAL<PSZ>::GetIntValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX32, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX32, false, &m); + return (m && val < INT_MAX32) ? (int)(-(signed)val) : (int)val; } // end of GetIntValue @@ -936,7 +1111,7 @@ int TYPVAL<PSZ>::GetIntValue(void) /***********************************************************************/ uint TYPVAL<PSZ>::GetUIntValue(void) { - return (uint)CharToNumber(Strp, strlen(Strp), UINT_MAX32, true); + return (uint)CharToNumber(Strp, strlen(Strp), UINT_MAX32, true); } // end of GetUintValue /***********************************************************************/ @@ -945,8 +1120,8 @@ uint TYPVAL<PSZ>::GetUIntValue(void) longlong TYPVAL<PSZ>::GetBigintValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX64, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX64, false, &m); + return (m && val < INT_MAX64) ? (-(signed)val) : (longlong)val; } // end of GetBigintValue @@ -955,7 +1130,7 @@ longlong TYPVAL<PSZ>::GetBigintValue(void) /***********************************************************************/ ulonglong TYPVAL<PSZ>::GetUBigintValue(void) { - return CharToNumber(Strp, strlen(Strp), ULONGLONG_MAX, true); + return CharToNumber(Strp, strlen(Strp), ULONGLONG_MAX, true); } // end of GetUBigintValue /***********************************************************************/ @@ -989,18 +1164,18 @@ bool TYPVAL<PSZ>::SetValue_char(char *p, int n) if ((n = MY_MIN(n, Len))) { strncpy(Strp, p, n); -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; +// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; for (p = Strp + n - 1; p >= Strp; p--) if (*p && *p != ' ') break; - *(++p) = '\0'; + *(++p) = '\0'; - if (trace > 1) - htrc(" Setting string to: '%s'\n", Strp); - - } else - Reset(); + if (trace > 1) + htrc(" Setting string to: '%s'\n", Strp); + + } else + Reset(); Null = false; } else { @@ -1239,6 +1414,32 @@ bool TYPVAL<PSZ>::IsEqual(PVAL vp, bool chktype) } // end of IsEqual /***********************************************************************/ +/* Compare values and returns 1, 0 or -1 according to comparison. */ +/* This function is used for evaluation of numeric filters. */ +/***********************************************************************/ +int TYPVAL<PSZ>::CompareValue(PVAL vp) + { + int n; +//assert(vp->GetType() == Type); + + if (trace) + htrc(" Comparing: val='%s','%s'\n", Strp, vp->GetCharValue()); + + // Process filtering on character strings. + if (Ci || vp->IsCi()) + n = stricmp(Strp, vp->GetCharValue()); + else + n = strcmp(Strp, vp->GetCharValue()); + +#if defined(WIN32) + if (n == _NLSCMPERROR) + return n; // Here we should raise an error +#endif // WIN32 + + return (n > 0) ? 1 : (n < 0) ? -1 : 0; + } // end of CompareValue + +/***********************************************************************/ /* FormatValue: This function set vp (a STRING value) to the string */ /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ @@ -1304,7 +1505,7 @@ bool DECVAL::IsZero(void) /***********************************************************************/ /* DECIMAL: Reset value to zero. */ /***********************************************************************/ -void DECVAL::Reset(void) +void DECVAL::Reset(void) { int i = 0; @@ -1383,18 +1584,18 @@ bool DECVAL::SetValue_char(char *p, int n) if ((n = MY_MIN(n, Len))) { strncpy(Strp, p, n); -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; +// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; for (p = Strp + n - 1; p >= Strp; p--) if (*p && *p != ' ') break; - *(++p) = '\0'; + *(++p) = '\0'; - if (trace > 1) - htrc(" Setting string to: '%s'\n", Strp); - - } else - Reset(); + if (trace > 1) + htrc(" Setting string to: '%s'\n", Strp); + + } else + Reset(); Null = false; } else { @@ -1464,6 +1665,23 @@ bool DECVAL::IsEqual(PVAL vp, bool chktype) return !strcmp(Strp, vp->GetCharString(buf)); } // end of IsEqual +/***********************************************************************/ +/* Compare values and returns 1, 0 or -1 according to comparison. */ +/* This function is used for evaluation of numeric filters. */ +/***********************************************************************/ +int DECVAL::CompareValue(PVAL vp) + { +//assert(vp->GetType() == Type); + + // Process filtering on numeric values. + double f = atof(Strp), n = vp->GetFloatValue(); + +//if (trace) +// htrc(" Comparing: val=%d,%d\n", f, n); + + return (f > n) ? 1 : (f < n) ? (-1) : 0; + } // end of CompareValue + #if 0 /***********************************************************************/ /* FormatValue: This function set vp (a STRING value) to the string */ @@ -2062,7 +2280,7 @@ bool DTVAL::MakeTime(struct tm *ptm) time_t t = mktime_mysql(ptm); if (trace > 1) - htrc("MakeTime from (%d,%d,%d,%d,%d,%d)\n", + htrc("MakeTime from (%d,%d,%d,%d,%d,%d)\n", ptm->tm_year, ptm->tm_mon, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec); @@ -2085,7 +2303,7 @@ bool DTVAL::MakeTime(struct tm *ptm) Tval= (int) t; if (trace > 1) - htrc("MakeTime Ival=%d\n", Tval); + htrc("MakeTime Ival=%d\n", Tval); return false; } // end of MakeTime @@ -2169,7 +2387,7 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval) } // endfor i if (trace > 1) - htrc("MakeDate datm=(%d,%d,%d,%d,%d,%d)\n", + htrc("MakeDate datm=(%d,%d,%d,%d,%d,%d)\n", datm.tm_year, datm.tm_mon, datm.tm_mday, datm.tm_hour, datm.tm_min, datm.tm_sec); diff --git a/storage/connect/value.h b/storage/connect/value.h index 56992d5bc26..3dc7940b964 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -46,9 +46,11 @@ DllExport char *GetFormatType(int); DllExport int GetFormatType(char); DllExport bool IsTypeChar(int type); DllExport bool IsTypeNum(int type); +DllExport int ConvertType(int, int, CONV, bool match = false); +DllExport PVAL AllocateValue(PGLOBAL, PVAL, int = TYPE_VOID, int = 0); DllExport PVAL AllocateValue(PGLOBAL, int, int len = 0, int prec = 0, bool uns = false, PSZ fmt = NULL); -DllExport ulonglong CharToNumber(char *, int, ulonglong, bool, +DllExport ulonglong CharToNumber(char *, int, ulonglong, bool, bool *minus = NULL, bool *rc = NULL); /***********************************************************************/ @@ -93,6 +95,9 @@ class DllExport VALUE : public BLOCK { virtual bool SetValue_pval(PVAL valp, bool chktype = false) = 0; virtual bool SetValue_char(char *p, int n) = 0; virtual void SetValue_psz(PSZ s) = 0; + virtual void SetValue_bool(bool b) {assert(FALSE);} + virtual int CompareValue(PVAL vp) = 0; + virtual BYTE TestValue(PVAL vp); virtual void SetValue(char c) {assert(false);} virtual void SetValue(uchar c) {assert(false);} virtual void SetValue(short i) {assert(false);} @@ -161,6 +166,8 @@ class DllExport TYPVAL : public VALUE { virtual bool SetValue_pval(PVAL valp, bool chktype); virtual bool SetValue_char(char *p, int n); virtual void SetValue_psz(PSZ s); + virtual void SetValue_bool(bool b) {Tval = (b) ? 1 : 0;} + virtual int CompareValue(PVAL vp); virtual void SetValue(char c) {Tval = (TYPE)c; Null = false;} virtual void SetValue(uchar c) {Tval = (TYPE)c; Null = false;} virtual void SetValue(short i) {Tval = (TYPE)i; Null = false;} @@ -199,7 +206,7 @@ class DllExport TYPVAL : public VALUE { /* Specific STRING class. */ /***********************************************************************/ template <> -class DllExport TYPVAL<PSZ>: public VALUE { +class DllExport TYPVAL<PSZ>: public VALUE { public: // Constructors TYPVAL(PSZ s); @@ -240,6 +247,7 @@ class DllExport TYPVAL<PSZ>: public VALUE { virtual void SetValue(ulonglong n); virtual void SetValue(double f); virtual void SetBinValue(void *p); + virtual int CompareValue(PVAL vp); virtual bool GetBinValue(void *buf, int buflen, bool go); virtual char *ShowValue(char *buf, int); virtual char *GetCharString(char *p); @@ -256,7 +264,7 @@ class DllExport TYPVAL<PSZ>: public VALUE { /***********************************************************************/ /* Specific DECIMAL class. */ /***********************************************************************/ -class DllExport DECVAL: public TYPVAL<PSZ> { +class DllExport DECVAL: public TYPVAL<PSZ> { public: // Constructors DECVAL(PSZ s); @@ -272,6 +280,7 @@ class DllExport DECVAL: public TYPVAL<PSZ> { virtual bool GetBinValue(void *buf, int buflen, bool go); virtual char *ShowValue(char *buf, int); virtual bool IsEqual(PVAL vp, bool chktype); + virtual int CompareValue(PVAL vp); // Members }; // end of class DECVAL @@ -279,7 +288,7 @@ class DllExport DECVAL: public TYPVAL<PSZ> { /***********************************************************************/ /* Specific BINARY class. */ /***********************************************************************/ -class DllExport BINVAL: public VALUE { +class DllExport BINVAL: public VALUE { public: // Constructors //BINVAL(void *p); @@ -320,6 +329,7 @@ class DllExport BINVAL: public VALUE { virtual void SetValue(double f); virtual void SetBinValue(void *p); virtual bool GetBinValue(void *buf, int buflen, bool go); + virtual int CompareValue(PVAL vp) {assert(false); return 0;} virtual char *ShowValue(char *buf, int); virtual char *GetCharString(char *p); virtual bool IsEqual(PVAL vp, bool chktype); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index fad7495fa82..12781b2ac05 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -1,7 +1,7 @@ /***************** Xindex C++ Class Xindex Code (.CPP) *****************/ -/* Name: XINDEX.CPP Version 2.8 */ +/* Name: XINDEX.CPP Version 2.9 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ /* */ /* This file contains the class XINDEX implementation code. */ /***********************************************************************/ @@ -45,6 +45,7 @@ //nclude "array.h" #include "filamtxt.h" #include "tabdos.h" +#include "tabvct.h" /***********************************************************************/ /* Macro or external routine definition */ @@ -112,6 +113,8 @@ INDEXDEF::INDEXDEF(char *name, bool uniq, int n) Unique = uniq; Invalid = false; AutoInc = false; + Dynamic = false; + Mapped = false; Nparts = 0; ID = n; //Offset = 0; @@ -165,6 +168,8 @@ XXBASE::XXBASE(PTDBDOS tbxp, bool b) : CSORT(b), Op = OP_EQ; To_KeyCol = NULL; Mul = false; + Srtd = false; + Dynamic = false; Val_K = -1; Nblk = Sblk = 0; Thresh = 7; @@ -237,25 +242,30 @@ void XINDEX::Reset(void) /***********************************************************************/ /* XINDEX Close: terminate index and free all allocated data. */ -/* Do not reset other values that are used at return to make. */ +/* Do not reset values that are used at return to make. */ /***********************************************************************/ void XINDEX::Close(void) { // Close file or view of file - X->Close(); + if (X) + X->Close(); // De-allocate data PlgDBfree(Record); PlgDBfree(Index); PlgDBfree(Offset); - // De-allocate Key data - for (PXCOL kcp = To_KeyCol; kcp; kcp = kcp->Next) + for (PXCOL kcp = To_KeyCol; kcp; kcp = kcp->Next) { + // Column values cannot be retrieved from key anymore + if (kcp->Colp) + kcp->Colp->SetKcol(NULL); + + // De-allocate Key data kcp->FreeData(); + } // endfor kcp - // Column values cannot be retrieved from key anymore - for (int k = 0; k < Nk; k++) - To_Cols[k]->SetKcol(NULL); + if (Tdbp) + Tdbp->RestoreNrec(); } // end of Close @@ -276,6 +286,25 @@ int XINDEX::Qcompare(int *i1, int *i2) } // end of Qcompare /***********************************************************************/ +/* AddColumns: here we try to determine whether it is worthwhile to */ +/* add to the keys the values of the columns selected for this table. */ +/* Sure enough, it is done while records are read and permit to avoid */ +/* reading the table while doing the join (Dynamic index only) */ +/***********************************************************************/ +bool XINDEX::AddColumns(PIXDEF xdp) + { + if (!Dynamic) + return false; // Not applying to static index + else if (IsMul()) + return false; // Not done yet for multiple index + else if (Tbxp->GetAmType() == TYPE_AM_VCT && ((PTDBVCT)Tbxp)->IsSplit()) + return false; // This would require to read additional files + else + return true; + + } // end of AddColumns + +/***********************************************************************/ /* Make: Make and index on key column(s). */ /***********************************************************************/ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) @@ -283,13 +312,18 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /*********************************************************************/ /* Table can be accessed through an index. */ /*********************************************************************/ - int k, rc = RC_OK; + int k, nk = Nk, rc = RC_OK; int *bof, i, j, n, ndf, nkey; PKPDEF kdfp = Xdp->GetToKeyParts(); - bool brc = true; + bool brc = false; PCOL colp; - PXCOL kp, prev = NULL, kcp = NULL; - PDBUSER dup = (PDBUSER)g->Activityp->Aptr; + PFIL filp = Tdbp->GetFilter(); + PXCOL kp, addcolp, prev = NULL, kcp = NULL; +//PDBUSER dup = (PDBUSER)g->Activityp->Aptr; + +#if defined(_DEBUG) + assert(X || Nk == 1); +#endif // _DEBUG /*********************************************************************/ /* Allocate the storage that will contain the keys and the file */ @@ -347,6 +381,51 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) To_LastCol = prev; + if (AddColumns(sxp)) { + PCOL kolp = To_Cols[0]; // Temporary while imposing Nk = 1 + + i = 0; + + // Allocate the accompanying + for (colp = Tbxp->GetColumns(); colp; colp = colp->GetNext()) { + // Count how many columns to add +// for (k = 0; k < Nk; k++) +// if (colp == To_Cols[k]) +// break; + +// if (k == nk) + if (colp != kolp) + i++; + + } // endfor colp + + if (i && i < 10) // Should be a parameter + for (colp = Tbxp->GetColumns(); colp; colp = colp->GetNext()) { +// for (k = 0; k < Nk; k++) +// if (colp == To_Cols[k]) +// break; + +// if (k < nk) + if (colp == kolp) + continue; // This is a key column + + kcp = new(g) KXYCOL(this); + + if (kcp->Init(g, colp, n, true, NULL)) + return true; + + if (trace) + htrc("Adding colp=%p Buf_Type=%d size=%d\n", + colp, colp->GetResultType(), n); + + nk++; + prev->Next = kcp; + prev = kcp; + } // endfor colp + + } // endif AddColumns + +#if 0 /*********************************************************************/ /* Get the starting information for progress. */ /*********************************************************************/ @@ -354,18 +433,19 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) sprintf((char*)dup->Step, MSG(BUILD_INDEX), Xdp->GetName(), Tdbp->Name); dup->ProgMax = Tdbp->GetProgMax(g); dup->ProgCur = 0; +#endif // 0 /*********************************************************************/ /* Standard init: read the file and construct the index table. */ /* Note: reading will be sequential as To_Kindex is not set. */ /*********************************************************************/ - for (i = nkey = 0; i < n && rc != RC_EF; i++) { -#if defined(THREAD) + for (i = nkey = 0; rc != RC_EF; i++) { +#if 0 if (!dup->Step) { strcpy(g->Message, MSG(QUERY_CANCELLED)); longjmp(g->jumper[g->jump_level], 99); } // endif Step -#endif // THREAD +#endif // 0 /*******************************************************************/ /* Read a valid record from table file. */ @@ -373,16 +453,19 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) rc = Tdbp->ReadDB(g); // Update progress information - dup->ProgCur = Tdbp->GetProgCur(); +// dup->ProgCur = Tdbp->GetProgCur(); // Check return code and do whatever must be done according to it switch (rc) { case RC_OK: - break; - case RC_EF: - goto end_of_file; + if (ApplyFilter(g, filp)) + break; + + // passthru case RC_NF: continue; + case RC_EF: + goto end_of_file; default: sprintf(g->Message, MSG(RC_READING), rc, Tdbp->Name); goto err; @@ -392,20 +475,25 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /* Get and Store the file position of the last read record for */ /* future direct access. */ /*******************************************************************/ - To_Rec[nkey] = Tdbp->GetRecpos(); + if (nkey == n) { + sprintf(g->Message, MSG(TOO_MANY_KEYS), nkey); + return true; + } else + To_Rec[nkey] = Tdbp->GetRecpos(); /*******************************************************************/ /* Get the keys and place them in the key blocks. */ /*******************************************************************/ for (k = 0, kcp = To_KeyCol; - k < Nk && kcp; + k < nk && kcp; k++, kcp = kcp->Next) { - colp = To_Cols[k]; - colp->Reset(); +// colp = To_Cols[k]; + colp = kcp->Colp; - colp->ReadColumn(g); -// if (colp->ReadColumn(g)) -// goto err; + if (!colp->GetStatus(BUF_READ)) + colp->ReadColumn(g); + else + colp->Reset(); kcp->SetValue(colp, nkey); } // endfor k @@ -416,7 +504,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) end_of_file: // Update progress information - dup->ProgCur = Tdbp->GetProgMax(g); +//dup->ProgCur = Tdbp->GetProgMax(g); /*********************************************************************/ /* Record the Index size and eventually resize memory allocation. */ @@ -451,18 +539,30 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) goto err; // Error } // endif alloc + // We must separate keys and added columns before sorting + addcolp = To_LastCol->Next; + To_LastCol->Next = NULL; + // Call the sort program, it returns the number of distinct values if ((Ndif = Qsort(g, Num_K)) < 0) goto err; // Error during sort +// if (trace) + htrc("Make: Nk=%d n=%d Num_K=%d Ndif=%d addcolp=%p BlkFil=%p X=%p\n", + Nk, n, Num_K, Ndif, addcolp, Tdbp->To_BlkFil, X); + // Check whether the unique index is unique indeed if (!Mul) if (Ndif < Num_K) { strcpy(g->Message, MSG(INDEX_NOT_UNIQ)); + brc = true; goto err; } else PlgDBfree(Offset); // Not used anymore + // Restore kcp list + To_LastCol->Next = addcolp; + // Use the index to physically reorder the xindex Srtd = Reorder(g); @@ -487,7 +587,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) } else { Mul = false; // Current index is unique PlgDBfree(Offset); // Not used anymore - MaxSame = 1; // Reset it when remaking an index + MaxSame = 1; // Reset it when remaking an index } // endif Ndif /*********************************************************************/ @@ -502,7 +602,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /* except if the subset originally contains unique values. */ /*********************************************************************/ // Update progress information - dup->Step = STEP(REDUCE_INDEX); +//dup->Step = STEP(REDUCE_INDEX); ndf = Ndif; To_LastCol->Mxs = MaxSame; @@ -550,9 +650,11 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /*********************************************************************/ /* For sorted columns and fixed record size, file position can be */ /* calculated, so the Record array can be discarted. */ + /* Not true for DBF tables because of eventual soft deleted lines. */ /* Note: for Num_K = 1 any non null value is Ok. */ /*********************************************************************/ - if (Srtd && Tdbp->Ftype != RECFM_VAR) { + if (Srtd && !filp && Tdbp->Ftype != RECFM_VAR + && Tdbp->Txfp->GetAmType() != TYPE_AM_DBF) { Incr = (Num_K > 1) ? To_Rec[1] : Num_K; PlgDBfree(Record); } // endif Srtd @@ -579,14 +681,24 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) Cur_K = Num_K; /*********************************************************************/ - /* Save the index so it has not to be recalculated. */ + /* Save the xindex so it has not to be recalculated. */ /*********************************************************************/ - if (!SaveIndex(g, sxp)) - brc = false; + if (X) { + if (SaveIndex(g, sxp)) + brc = true; + + } else { // Dynamic index + // Indicate that key column values can be found from KEYCOL's + for (kcp = To_KeyCol; kcp; kcp = kcp->Next) + kcp->Colp->SetKcol(kcp); + + Tdbp->SetFilter(NULL); // Not used anymore + } // endif X err: // We don't need the index anymore - Close(); + if (X || brc) + Close(); if (brc) printf("%s\n", g->Message); @@ -630,6 +742,7 @@ bool XINDEX::Reorder(PGLOBAL g) register int i, j, k, n; bool sorted = true; PXCOL kcp; +#if 0 PDBUSER dup = (PDBUSER)g->Activityp->Aptr; if (Num_K > 500000) { @@ -639,6 +752,7 @@ bool XINDEX::Reorder(PGLOBAL g) dup->ProgCur = 0; } else dup = NULL; +#endif // 0 if (!Pex) return Srtd; @@ -647,8 +761,8 @@ bool XINDEX::Reorder(PGLOBAL g) if (Pex[i] == Num_K) { // Already moved continue; } else if (Pex[i] == i) { // Already placed - if (dup) - dup->ProgCur++; +// if (dup) +// dup->ProgCur++; continue; } // endif's Pex @@ -677,8 +791,8 @@ bool XINDEX::Reorder(PGLOBAL g) To_Rec[j] = To_Rec[k]; } // endif k - if (dup) - dup->ProgCur++; +// if (dup) +// dup->ProgCur++; } // endfor j @@ -762,11 +876,11 @@ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) n[4] = Incr; // Increment of record positions n[5] = Nblk; n[6] = Sblk; -#if defined(TRACE) - printf("Saving index %s\n", Xdp->GetName()); - printf("ID=%d Nk=%d nof=%d Num_K=%d Incr=%d Nblk=%d Sblk=%d\n", - ID, Nk, nof, Num_K, Incr, Nblk, Sblk); -#endif // TRACE + if (trace) { + htrc("Saving index %s\n", Xdp->GetName()); + htrc("ID=%d Nk=%d nof=%d Num_K=%d Incr=%d Nblk=%d Sblk=%d\n", + ID, Nk, nof, Num_K, Incr, Nblk, Sblk); + } // endif trace size = X->Write(g, n, NZ, sizeof(int), rc); dup->ProgCur = 1; @@ -805,9 +919,8 @@ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) dup->ProgCur += 5; } // endfor kcp -#if defined(TRACE) - printf("Index %s saved, Size=%d\n", Xdp->GetName(), Size); -#endif // TRACE + if (trace) + htrc("Index %s saved, Size=%d\n", Xdp->GetName(), size); end: X->Close(fn, id); @@ -896,9 +1009,8 @@ bool XINDEX::Init(PGLOBAL g) PlugSetPath(fn, fn, Tdbp->GetPath()); -#if defined(TRACE) - printf("Index %s file: %s\n", Xdp->GetName(), fn); -#endif // TRACE + if (trace) + htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ /* Open the index file and check its validity. */ @@ -910,18 +1022,18 @@ bool XINDEX::Init(PGLOBAL g) if (X->Read(g, nv, NZ, sizeof(int))) goto err; -#if defined(TRACE) - printf("nv=%d %d %d %d %d %d %d\n", - nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6]); -#endif // TRACE + if (trace) + htrc("nv=%d %d %d %d %d %d %d\n", + nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6]); // The test on ID was suppressed because MariaDB can change an index ID - // when other indexes are added or deleted + // when other indexes are added or deleted if (/*nv[0] != ID ||*/ nv[1] != Nk) { sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); -#if defined(TRACE) - printf("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); -#endif // TRACE + + if (trace) + htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); + goto err; } // endif @@ -1140,9 +1252,8 @@ bool XINDEX::MapInit(PGLOBAL g) PlugSetPath(fn, fn, Tdbp->GetPath()); -#if defined(TRACE) - printf("Index %s file: %s\n", Xdp->GetName(), fn); -#endif // TRACE + if (trace) + htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ /* Get a view on the part of the index file containing this index. */ @@ -1157,24 +1268,24 @@ bool XINDEX::MapInit(PGLOBAL g) // Position the memory base at the offset of this index mbase += noff[id].Low; } // endif id - + // Now start the mapping process. nv = (int*)mbase; mbase += NZ * sizeof(int); -#if defined(TRACE) - printf("nv=%d %d %d %d %d %d %d\n", - nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6]); -#endif // TRACE + if (trace) + htrc("nv=%d %d %d %d %d %d %d\n", + nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6]); // The test on ID was suppressed because MariaDB can change an index ID - // when other indexes are added or deleted + // when other indexes are added or deleted if (/*nv[0] != ID ||*/ nv[1] != Nk) { // Not this index sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); -#if defined(TRACE) - printf("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); -#endif // TRACE + + if (trace) + htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); + goto err; } // endif nv @@ -1272,16 +1383,19 @@ err: /***********************************************************************/ /* Get Ndif and Num_K from the index file. */ /***********************************************************************/ -bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) +bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk) { char *ftype; char fn[_MAX_PATH]; - int n, nv[NZ], id = -1; - bool estim = false; + int nv[NZ], id = -1; // n +//bool estim = false; + bool rc = true; PDOSDEF defp = (PDOSDEF)Tdbp->To_Def; - ndif = numk = 0; +// ndif = numk = 0; + numk = 0; +#if 0 /*********************************************************************/ /* Get the estimated table size. */ /* Note: for fixed tables we must use cardinality to avoid the call */ @@ -1309,6 +1423,7 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) strcpy(g->Message, MSG(NO_KEY_COL)); return true; // Error } // endif Nk +#endif // 0 switch (Tdbp->Ftype) { case RECFM_VAR: ftype = ".dnx"; break; @@ -1341,9 +1456,8 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) PlugSetPath(fn, fn, Tdbp->GetPath()); -#if defined(TRACE) - printf("Index %s file: %s\n", Xdp->GetName(), fn); -#endif // TRACE + if (trace) + htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ /* Open the index file and check its validity. */ @@ -1359,20 +1473,21 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) if (X->Read(g, nv, NZ, sizeof(int))) goto err; -#if defined(TRACE) - printf("nv=%d %d %d %d\n", nv[0], nv[1], nv[2], nv[3]); -#endif // TRACE + if (trace) + htrc("nv=%d %d %d %d\n", nv[0], nv[1], nv[2], nv[3]); // The test on ID was suppressed because MariaDB can change an index ID - // when other indexes are added or deleted + // when other indexes are added or deleted if (/*nv[0] != ID ||*/ nv[1] != Nk) { sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); -#if defined(TRACE) - printf("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); -#endif // TRACE + + if (trace) + htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); + goto err; } // endif +#if 0 if (nv[2]) { Mul = true; Ndif = nv[2] - 1; // nv[2] is offset size, equal to Ndif + 1 @@ -1388,9 +1503,11 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) sprintf(g->Message, MSG(OPT_NOT_MATCH), fn); goto err; } // endif +#endif // 0 Num_K = nv[3]; +#if 0 if (Nk > 1) { if (nv[2] && X->Seek(g, nv[2] * sizeof(int), 0, SEEK_CUR)) goto err; @@ -1411,17 +1528,18 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) Ndif = nv[0]; } // endif Nk +#endif // 0 /*********************************************************************/ /* Set size values. */ /*********************************************************************/ - ndif = Ndif; +//ndif = Ndif; numk = Num_K; - return false; + rc = false; err: X->Close(); - return true; + return rc; } // end of GetAllSizes /***********************************************************************/ @@ -1455,7 +1573,7 @@ int XINDEX::Range(PGLOBAL g, int limit, bool incl) n = k; // if (limit) // n = (Mul) ? k : kp->Val_K; -// else +// else // n = (Mul) ? Pof[kp->Val_K + 1] - k : 1; } else { @@ -1642,9 +1760,8 @@ int XINDEX::Fetch(PGLOBAL g) break; case OP_SAME: // Read next same // Logically the key values should be the same as before -#if defined(TRACE) - printf("looking for next same value\n"); -#endif // TRACE + if (trace > 1) + htrc("looking for next same value\n"); if (NextVal(true)) { Op = OP_EQ; @@ -1690,9 +1807,9 @@ int XINDEX::Fetch(PGLOBAL g) Nth++; -#if defined(TRACE) - printf("Fetch: Looking for new value\n"); -#endif // TRACE + if (trace > 1) + htrc("Fetch: Looking for new value\n"); + Cur_K = FastFind(Nval); if (Cur_K >= Num_K) @@ -1848,8 +1965,7 @@ int XINDEX::FastFind(int nv) XINDXS::XINDXS(PTDBDOS tdbp, PIXDEF xdp, PXLOAD pxp, PCOL *cp, PXOB *xp) : XINDEX(tdbp, xdp, pxp, cp, xp) { -//Srtd = To_Cols[0]->GetOpt() < 0; // ????? - Srtd = false; + Srtd = To_Cols[0]->GetOpt() == 2; } // end of XINDXS constructor /***********************************************************************/ @@ -1891,7 +2007,7 @@ int XINDXS::Range(PGLOBAL g, int limit, bool incl) if (k < Num_K || Op != OP_EQ) if (limit) n = (Mul) ? k : kp->Val_K; - else + else n = (Mul) ? Pof[kp->Val_K + 1] - k : 1; } else { @@ -1987,10 +2103,9 @@ int XINDXS::Fetch(PGLOBAL g) To_KeyCol->Val_K = Cur_K = 0; Op = OP_NEXT; break; - case OP_SAME: // Read next same -#if defined(TRACE) -// printf("looking for next same value\n"); -#endif // TRACE + case OP_SAME: // Read next same + if (trace > 1) + htrc("looking for next same value\n"); if (!Mul || NextVal(true)) { Op = OP_EQ; @@ -2023,18 +2138,17 @@ int XINDXS::Fetch(PGLOBAL g) /* Look for the first key equal to the link column values */ /* and return its rank whithin the index table. */ /*****************************************************************/ - if (To_KeyCol->InitFind(g, To_Vals[0])) - return -1; // No more constant values - else - Nth++; - -#if defined(TRACE) - printf("Fetch: Looking for new value\n"); -#endif // TRACE - - Cur_K = FastFind(1); - - if (Cur_K >= Num_K) + if (To_KeyCol->InitFind(g, To_Vals[0])) + return -1; // No more constant values + else + Nth++; + + if (trace > 1) + htrc("Fetch: Looking for new value\n"); + + Cur_K = FastFind(1); + + if (Cur_K >= Num_K) // Rank not whithin index table, signal record not found return -2; else if (Mul) @@ -2119,7 +2233,10 @@ int XINDXS::FastFind(int nk) n = 0; } // endif sup - kcp->Val_K = i; // Used by FillValue + // Loop on kcp because of dynamic indexing + for (; kcp; kcp = kcp->Next) + kcp->Val_K = i; // Used by FillValue + return ((n) ? Num_K : (Mul) ? Pof[i] : i); } // end of FastFind @@ -2195,7 +2312,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Xseek"); return true; } // endif - + NewOff.Low = (int)ftell(Xfile); } else if (mode == MODE_WRITE) { if (id >= 0) { @@ -2218,7 +2335,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Xseek"); return true; } // endif - + } // endif mode return false; @@ -2424,14 +2541,14 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) } // endif rc // Position the cursor at the offset of this index - rc = SetFilePointer(Hfile, noff[id].Low, + rc = SetFilePointer(Hfile, noff[id].Low, (PLONG)&noff[id].High, FILE_BEGIN); if (rc == INVALID_SET_FILE_POINTER) { sprintf(g->Message, MSG(FUNC_ERRNO), GetLastError(), "SetFilePointer"); return true; } // endif - + } // endif Mode #else // UNIX @@ -2479,7 +2596,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Seek"); return true; } // endif - + } else if (mode == MODE_WRITE) { if (id >= 0) { // New not sep index file. Write the header. @@ -2499,7 +2616,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Hseek"); return true; } // endif - + } // endif mode #endif // UNIX @@ -2526,15 +2643,15 @@ bool XHUGE::Seek(PGLOBAL g, int low, int high, int origin) if (lseek64(Hfile, pos, origin) < 0) { sprintf(g->Message, MSG(ERROR_IN_LSK), errno); -#if defined(TRACE) - printf("lseek64 error %d\n", errno); -#endif // TRACE + + if (trace) + htrc("lseek64 error %d\n", errno); + return true; } // endif lseek64 -#if defined(TRACE) - printf("Seek: low=%d high=%d\n", low, high); -#endif // TRACE + if (trace) + htrc("Seek: low=%d high=%d\n", low, high); #endif // UNIX return false; @@ -2637,7 +2754,7 @@ void XHUGE::Close(char *fn, int id) CloseFileHandle(Hfile); Hfile = CreateFile(fn, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); - + if (Hfile != INVALID_HANDLE_VALUE) if (SetFilePointer(Hfile, id * sizeof(IOFF), NULL, FILE_BEGIN) != INVALID_SET_FILE_POINTER) { @@ -2650,7 +2767,7 @@ void XHUGE::Close(char *fn, int id) #else // !WIN32 if (id >= 0 && fn) { fcntl(Hfile, F_SETFD, O_WRONLY); - + if (lseek(Hfile, id * sizeof(IOFF), SEEK_SET)) write(Hfile, &NewOff, sizeof(IOFF)); @@ -2788,7 +2905,7 @@ int XXROW::FastFind(int nk) /***********************************************************************/ /* KXYCOL public constructor. */ /***********************************************************************/ -KXYCOL::KXYCOL(PKXBASE kp) : To_Keys(Keys.Memp), +KXYCOL::KXYCOL(PKXBASE kp) : To_Keys(Keys.Memp), To_Bkeys(Bkeys.Memp), Kof((CPINT&)Koff.Memp) { Next = NULL; @@ -2821,7 +2938,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln) int len = colp->GetLength(), prec = colp->GetScale(); // Currently no indexing on NULL columns - if (colp->IsNullable()) { + if (colp->IsNullable() && kln) { sprintf(g->Message, "Cannot index nullable column %s", colp->GetName()); return true; } // endif nullable @@ -2860,8 +2977,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln) // Store this information to avoid sorting when already done if (Asc) -// IsSorted = colp->GetOpt() == 2; - IsSorted = false; + IsSorted = colp->GetOpt() == 2; //SetNulls(colp->IsNullable()); for when null columns will be indexable Colp = colp; @@ -2885,9 +3001,9 @@ BYTE* KXYCOL::MapInit(PGLOBAL g, PCOL colp, int *n, BYTE *m) Type = colp->GetResultType(); - if (trace) - htrc("MapInit(%p): colp=%p type=%d n=%d len=%d m=%p\n", - this, colp, Type, n[0], len, m); + if (trace) + htrc("MapInit(%p): colp=%p type=%d n=%d len=%d m=%p\n", + this, colp, Type, n[0], len, m); // Allocate the Value object used when moving items Valp = AllocateValue(g, Type, len, prec, colp->IsUnsigned()); diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h index 62430ffa0ad..4988a12326a 100644 --- a/storage/connect/xindex.h +++ b/storage/connect/xindex.h @@ -87,7 +87,9 @@ class DllExport INDEXDEF : public BLOCK { /* Index description block */ void SetNext(PIXDEF pxdf) {Next = pxdf;} PSZ GetName(void) {return (PSZ)Name;} bool IsUnique(void) {return Unique;} + bool IsDynamic(void) {return Dynamic;} bool IsAuto(void) {return AutoInc;} + bool IsValid(void) {return !Invalid;} void SetAuto(bool b) {AutoInc = b;} void SetInvalid(bool b) {Invalid = b;} int GetNparts(void) {return Nparts;} @@ -115,6 +117,8 @@ class DllExport INDEXDEF : public BLOCK { /* Index description block */ bool Unique; /* true if defined as unique */ bool Invalid; /* true if marked as Invalid */ bool AutoInc; /* true if unique key in auto increment */ + bool Dynamic; /* KINDEX style */ + bool Mapped; /* Use file mapping */ int Nparts; /* Number of key parts */ int ID; /* Index ID number */ int MaxSame; /* Max number of same values */ @@ -174,6 +178,8 @@ class DllExport XXBASE : public CSORT, public BLOCK { virtual void Reset(void) = 0; virtual bool IsMul(void) {return false;} virtual bool IsRandom(void) {return true;} + virtual bool IsDynamic(void) {return Dynamic;} + virtual void SetDynamic(bool dyn) {Dynamic = dyn;} virtual bool HaveSame(void) {return false;} virtual int GetCurPos(void) {return Cur_K;} virtual void SetNval(int n) {assert(n == 1);} @@ -192,6 +198,7 @@ class DllExport XXBASE : public CSORT, public BLOCK { virtual void Print(PGLOBAL g, FILE *f, uint n); virtual void Print(PGLOBAL g, char *ps, uint z); virtual bool Init(PGLOBAL g) = 0; + virtual bool Make(PGLOBAL g, PIXDEF sxp) = 0; #if defined(XMAP) virtual bool MapInit(PGLOBAL g) = 0; #endif // XMAP @@ -223,6 +230,7 @@ class DllExport XXBASE : public CSORT, public BLOCK { OPVAL Op; // Search operator bool Mul; // true if multiple bool Srtd; // true for sorted column + bool Dynamic; // true when dynamically made int Val_K; // Index of current value int Nblk; // Number of blocks int Sblk; // Block size @@ -268,9 +276,10 @@ class DllExport XINDEX : public XXBASE { virtual bool Make(PGLOBAL g, PIXDEF sxp); virtual bool SaveIndex(PGLOBAL g, PIXDEF sxp); virtual bool Reorder(PGLOBAL g); - bool GetAllSizes(PGLOBAL g, int &ndif, int &numk); + bool GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk); protected: + bool AddColumns(PIXDEF xdp); bool NextValDif(void); // Members @@ -417,6 +426,7 @@ class DllExport XXROW : public XXBASE { virtual int MaxRange(void) {return 1;} virtual int Range(PGLOBAL g, int limit = 0, bool incl = true); virtual int Qcompare(int *, int *) {assert(false); return 0;} + virtual bool Make(PGLOBAL g, PIXDEF sxp) {return false;} virtual void Close(void) {} protected: diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index 05b408da2d2..cdc2ef9bf62 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -1,175 +1,186 @@ -/************ Xobject C++ Functions Source Code File (.CPP) ************/ -/* Name: XOBJECT.CPP Version 2.2 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2012 */ -/* */ -/* This file contains base XOBJECT class functions. */ -/* Also here is the implementation of the CONSTANT class. */ -/***********************************************************************/ - -/***********************************************************************/ -/* Include mariaDB header file. */ -/***********************************************************************/ -#include "my_global.h" - -/***********************************************************************/ -/* Include required application header files */ -/* global.h is header containing all global Plug declarations. */ -/* plgdbsem.h is header containing the DB applic. declarations. */ -/***********************************************************************/ -#include "global.h" -#include "plgdbsem.h" -#include "xobject.h" - -/***********************************************************************/ -/* Macro definitions. */ -/***********************************************************************/ -#if defined(_DEBUG) || defined(DEBTRACE) -#define ASSERT(B) assert(B); -#else -#define ASSERT(B) -#endif - -/***********************************************************************/ -/* The one and only needed void object. */ -/***********************************************************************/ -XVOID Xvoid; -PXOB const pXVOID = &Xvoid; // Pointer used by other classes - -/* ------------------------- Class XOBJECT --------------------------- */ - -/***********************************************************************/ -/* GetCharValue: returns the Result value as a char string. */ -/* Using GetCharValue provides no conversion from numeric types. */ -/***********************************************************************/ -PSZ XOBJECT::GetCharValue(void) - { - ASSERT(Value) - return Value->GetCharValue(); - } // end of GetCharValue() - -/***********************************************************************/ -/* GetShortValue: returns the Result value as a short integer. */ -/***********************************************************************/ -short XOBJECT::GetShortValue(void) - { - ASSERT(Value) - return Value->GetShortValue(); - } // end of GetShortValue - -/***********************************************************************/ -/* GetIntValue: returns the Result value as a int integer. */ -/***********************************************************************/ -int XOBJECT::GetIntValue(void) - { - ASSERT(Value) - return Value->GetIntValue(); - } // end of GetIntValue - -/***********************************************************************/ -/* GetFloatValue: returns the Result value as a double float. */ -/***********************************************************************/ -double XOBJECT::GetFloatValue(void) - { - ASSERT(Value) - return Value->GetFloatValue(); - } // end of GetFloatValue - -/* ------------------------- Class CONSTANT -------------------------- */ - -/***********************************************************************/ -/* CONSTANT public constructor. */ -/***********************************************************************/ -CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) - { - if (!(Value = AllocateValue(g, value, (int)type))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); - - Constant = true; - } // end of CONSTANT constructor - -/***********************************************************************/ -/* CONSTANT public constructor. */ -/***********************************************************************/ -CONSTANT::CONSTANT(PGLOBAL g, int n) - { - if (!(Value = AllocateValue(g, &n, TYPE_INT))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); - - Constant = true; - } // end of CONSTANT constructor - -/***********************************************************************/ -/* GetLengthEx: returns an evaluation of the constant string length. */ -/* Note: When converting from token to string, length has to be */ -/* specified but we need the domain length, not the value length. */ -/***********************************************************************/ -int CONSTANT::GetLengthEx(void) - { - return Value->GetValLen(); - } // end of GetLengthEx - -/***********************************************************************/ -/* Compare: returns true if this object is equivalent to xp. */ -/***********************************************************************/ -bool CONSTANT::Compare(PXOB xp) - { - if (this == xp) - return true; - else if (xp->GetType() != TYPE_CONST) - return false; - else - return Value->IsEqual(xp->GetValue(), true); - - } // end of Compare - -#if 0 -/***********************************************************************/ -/* Rephrase: temporary implementation used by PlugRephraseSQL. */ -/***********************************************************************/ -bool CONSTANT::Rephrase(PGLOBAL g, PSZ work) - { - switch (Value->GetType()) { - case TYPE_STRING: - sprintf(work + strlen(work), "'%s'", Value->GetCharValue()); - break; - case TYPE_SHORT: - sprintf(work + strlen(work), "%hd", Value->GetShortValue()); - break; - case TYPE_INT: - case TYPE_DATE: - sprintf(work + strlen(work), "%d", Value->GetIntValue()); - break; - case TYPE_DOUBLE: - sprintf(work + strlen(work), "%lf", Value->GetFloatValue()); - break; - case TYPE_BIGINT: - sprintf(work + strlen(work), "%lld", Value->GetBigintValue()); - break; - case TYPE_TINY: - sprintf(work + strlen(work), "%d", Value->GetTinyValue()); - break; - default: - sprintf(g->Message, MSG(BAD_CONST_TYPE), Value->GetType()); - return false; - } // endswitch - - return false; - } // end of Rephrase -#endif // 0 - -/***********************************************************************/ -/* Make file output of a constant object. */ -/***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, FILE *f, uint n) - { - Value->Print(g, f, n); - } /* end of Print */ - -/***********************************************************************/ -/* Make string output of a constant object. */ -/***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, char *ps, uint z) - { - Value->Print(g, ps, z); - } /* end of Print */ +/************ Xobject C++ Functions Source Code File (.CPP) ************/
+/* Name: XOBJECT.CPP Version 2.3 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* */
+/* This file contains base XOBJECT class functions. */
+/* Also here is the implementation of the CONSTANT class. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include mariaDB header file. */
+/***********************************************************************/
+#include "my_global.h"
+
+/***********************************************************************/
+/* Include required application header files */
+/* global.h is header containing all global Plug declarations. */
+/* plgdbsem.h is header containing the DB applic. declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "xobject.h"
+
+/***********************************************************************/
+/* Macro definitions. */
+/***********************************************************************/
+#if defined(_DEBUG) || defined(DEBTRACE)
+#define ASSERT(B) assert(B);
+#else
+#define ASSERT(B)
+#endif
+
+/***********************************************************************/
+/* The one and only needed void object. */
+/***********************************************************************/
+XVOID Xvoid;
+PXOB const pXVOID = &Xvoid; // Pointer used by other classes
+
+/* ------------------------- Class XOBJECT --------------------------- */
+
+/***********************************************************************/
+/* GetCharValue: returns the Result value as a char string. */
+/* Using GetCharValue provides no conversion from numeric types. */
+/***********************************************************************/
+PSZ XOBJECT::GetCharValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetCharValue();
+ } // end of GetCharValue()
+
+/***********************************************************************/
+/* GetShortValue: returns the Result value as a short integer. */
+/***********************************************************************/
+short XOBJECT::GetShortValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetShortValue();
+ } // end of GetShortValue
+
+/***********************************************************************/
+/* GetIntValue: returns the Result value as a int integer. */
+/***********************************************************************/
+int XOBJECT::GetIntValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetIntValue();
+ } // end of GetIntValue
+
+/***********************************************************************/
+/* GetFloatValue: returns the Result value as a double float. */
+/***********************************************************************/
+double XOBJECT::GetFloatValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetFloatValue();
+ } // end of GetFloatValue
+
+/* ------------------------- Class CONSTANT -------------------------- */
+
+/***********************************************************************/
+/* CONSTANT public constructor. */
+/***********************************************************************/
+CONSTANT::CONSTANT(PGLOBAL g, void *value, short type)
+ {
+ if (!(Value = AllocateValue(g, value, (int)type)))
+ longjmp(g->jumper[g->jump_level], TYPE_CONST);
+
+ Constant = true;
+ } // end of CONSTANT constructor
+
+/***********************************************************************/
+/* CONSTANT public constructor. */
+/***********************************************************************/
+CONSTANT::CONSTANT(PGLOBAL g, int n)
+ {
+ if (!(Value = AllocateValue(g, &n, TYPE_INT)))
+ longjmp(g->jumper[g->jump_level], TYPE_CONST);
+
+ Constant = true;
+ } // end of CONSTANT constructor
+
+/***********************************************************************/
+/* GetLengthEx: returns an evaluation of the constant string length. */
+/* Note: When converting from token to string, length has to be */
+/* specified but we need the domain length, not the value length. */
+/***********************************************************************/
+int CONSTANT::GetLengthEx(void)
+ {
+ return Value->GetValLen();
+ } // end of GetLengthEx
+
+/***********************************************************************/
+/* Convert a constant to the given type. */
+/***********************************************************************/
+void CONSTANT::Convert(PGLOBAL g, int newtype)
+ {
+ if (Value->GetType() != newtype)
+ if (!(Value = AllocateValue(g, Value, newtype)))
+ longjmp(g->jumper[g->jump_level], TYPE_CONST);
+
+ } // end of Convert
+
+/***********************************************************************/
+/* Compare: returns true if this object is equivalent to xp. */
+/***********************************************************************/
+bool CONSTANT::Compare(PXOB xp)
+ {
+ if (this == xp)
+ return true;
+ else if (xp->GetType() != TYPE_CONST)
+ return false;
+ else
+ return Value->IsEqual(xp->GetValue(), true);
+
+ } // end of Compare
+
+#if 0
+/***********************************************************************/
+/* Rephrase: temporary implementation used by PlugRephraseSQL. */
+/***********************************************************************/
+bool CONSTANT::Rephrase(PGLOBAL g, PSZ work)
+ {
+ switch (Value->GetType()) {
+ case TYPE_STRING:
+ sprintf(work + strlen(work), "'%s'", Value->GetCharValue());
+ break;
+ case TYPE_SHORT:
+ sprintf(work + strlen(work), "%hd", Value->GetShortValue());
+ break;
+ case TYPE_INT:
+ case TYPE_DATE:
+ sprintf(work + strlen(work), "%d", Value->GetIntValue());
+ break;
+ case TYPE_DOUBLE:
+ sprintf(work + strlen(work), "%lf", Value->GetFloatValue());
+ break;
+ case TYPE_BIGINT:
+ sprintf(work + strlen(work), "%lld", Value->GetBigintValue());
+ break;
+ case TYPE_TINY:
+ sprintf(work + strlen(work), "%d", Value->GetTinyValue());
+ break;
+ default:
+ sprintf(g->Message, MSG(BAD_CONST_TYPE), Value->GetType());
+ return false;
+ } // endswitch
+
+ return false;
+ } // end of Rephrase
+#endif // 0
+
+/***********************************************************************/
+/* Make file output of a constant object. */
+/***********************************************************************/
+void CONSTANT::Print(PGLOBAL g, FILE *f, uint n)
+ {
+ Value->Print(g, f, n);
+ } /* end of Print */
+
+/***********************************************************************/
+/* Make string output of a constant object. */
+/***********************************************************************/
+void CONSTANT::Print(PGLOBAL g, char *ps, uint z)
+ {
+ Value->Print(g, ps, z);
+ } /* end of Print */
diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h index 15ba6d99f33..1621b4e82ff 100644 --- a/storage/connect/xobject.h +++ b/storage/connect/xobject.h @@ -1,118 +1,119 @@ -/*************** Xobject H Declares Source Code File (.H) **************/ -/* Name: XOBJECT.H Version 2.3 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2012 */ -/* */ -/* This file contains the XOBJECT and derived classes declares. */ -/***********************************************************************/ - -#ifndef __XOBJECT__H -#define __XOBJECT__H - -/***********************************************************************/ -/* Include required application header files */ -/* block.h is header containing Block global declarations. */ -/***********************************************************************/ -#include "block.h" -#include "valblk.h" // includes value.h - -/***********************************************************************/ -/* Types used in some class definitions. */ -/***********************************************************************/ -//typedef struct _tabdesc *PTABD; // For friend setting - -/***********************************************************************/ -/* The pointer to the one and only needed void object. */ -/***********************************************************************/ -extern PXOB const pXVOID; - -/***********************************************************************/ -/* Class XOBJECT is the base class for all classes that can be used */ -/* in evaluation operations: FILTER, EXPRESSION, SCALF, FNC, COLBLK, */ -/* SELECT, FILTER as well as all the constant object types. */ -/***********************************************************************/ -class DllExport XOBJECT : public BLOCK { - public: - XOBJECT(void) {Value = NULL; Constant = false;} - - // Implementation - PVAL GetValue(void) {return Value;} - bool IsConstant(void) {return Constant;} - virtual int GetType(void) {return TYPE_XOBJECT;} - virtual int GetResultType(void) {return TYPE_VOID;} - virtual int GetKey(void) {return 0;} -#if defined(_DEBUG) - virtual void SetKey(int k) {assert(false);} -#else // !_DEBUG - virtual void SetKey(int k) {} // Only defined for COLBLK -#endif // !_DEBUG - virtual int GetLength(void) = 0; - virtual int GetLengthEx(void) = 0; - virtual PSZ GetCharValue(void); - virtual short GetShortValue(void); - virtual int GetIntValue(void); - virtual double GetFloatValue(void); - virtual int GetScale(void) = 0; - - // Methods - virtual void Reset(void) {} - virtual bool Compare(PXOB) = 0; - virtual bool Init(PGLOBAL) {return false;} - virtual bool Eval(PGLOBAL) {return false;} - virtual bool SetFormat(PGLOBAL, FORMAT&) = 0; - - protected: - PVAL Value; // The current value of the object. - bool Constant; // true for an object having a constant value. - }; // end of class XOBJECT - -/***********************************************************************/ -/* Class XVOID: represent a void (null) object. */ -/* Used to represent a void parameter for count(*) or for a filter. */ -/***********************************************************************/ -class DllExport XVOID : public XOBJECT { - public: - XVOID(void) {Constant = true;} - - // Implementation - virtual int GetType(void) {return TYPE_VOID;} - virtual int GetLength(void) {return 0;} - virtual int GetLengthEx(void) {return 0;} - virtual PSZ GetCharValue(void) {return NULL;} - virtual int GetIntValue(void) {return 0;} - virtual double GetFloatValue(void) {return 0.0;} - virtual int GetScale() {return 0;} - - // Methods - virtual bool Compare(PXOB xp) {return xp->GetType() == TYPE_VOID;} - virtual bool SetFormat(PGLOBAL, FORMAT&) {return true;} - }; // end of class XVOID - - -/***********************************************************************/ -/* Class CONSTANT: represents a constant XOBJECT of any value type. */ -/* Note that the CONSTANT class is a friend of the VALUE class; */ -/***********************************************************************/ -class DllExport CONSTANT : public XOBJECT { - public: - CONSTANT(PGLOBAL g, void *value, short type); - CONSTANT(PGLOBAL g, int n); - CONSTANT(PVAL valp) {Value = valp; Constant = true;} - - // Implementation - virtual int GetType(void) {return TYPE_CONST;} - virtual int GetResultType(void) {return Value->Type;} - virtual int GetLength(void) {return Value->GetValLen();} - virtual int GetScale() {return Value->GetValPrec();} - virtual int GetLengthEx(void); - - // Methods - virtual bool Compare(PXOB xp); - virtual bool SetFormat(PGLOBAL g, FORMAT& fmt) - {return Value->SetConstFormat(g, fmt);} - void SetValue(PVAL vp) {Value = vp;} - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); - }; // end of class CONSTANT - -#endif +/*************** Xobject H Declares Source Code File (.H) **************/
+/* Name: XOBJECT.H Version 2.4 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* */
+/* This file contains the XOBJECT and derived classes declares. */
+/***********************************************************************/
+
+#ifndef __XOBJECT__H
+#define __XOBJECT__H
+
+/***********************************************************************/
+/* Include required application header files */
+/* block.h is header containing Block global declarations. */
+/***********************************************************************/
+#include "block.h"
+#include "valblk.h" // includes value.h
+
+/***********************************************************************/
+/* Types used in some class definitions. */
+/***********************************************************************/
+//typedef struct _tabdesc *PTABD; // For friend setting
+
+/***********************************************************************/
+/* The pointer to the one and only needed void object. */
+/***********************************************************************/
+extern PXOB const pXVOID;
+
+/***********************************************************************/
+/* Class XOBJECT is the base class for all classes that can be used */
+/* in evaluation operations: FILTER, EXPRESSION, SCALF, FNC, COLBLK, */
+/* SELECT, FILTER as well as all the constant object types. */
+/***********************************************************************/
+class DllExport XOBJECT : public BLOCK {
+ public:
+ XOBJECT(void) {Value = NULL; Constant = false;}
+
+ // Implementation
+ PVAL GetValue(void) {return Value;}
+ bool IsConstant(void) {return Constant;}
+ virtual int GetType(void) {return TYPE_XOBJECT;}
+ virtual int GetResultType(void) {return TYPE_VOID;}
+ virtual int GetKey(void) {return 0;}
+#if defined(_DEBUG)
+ virtual void SetKey(int k) {assert(false);}
+#else // !_DEBUG
+ virtual void SetKey(int k) {} // Only defined for COLBLK
+#endif // !_DEBUG
+ virtual int GetLength(void) = 0;
+ virtual int GetLengthEx(void) = 0;
+ virtual PSZ GetCharValue(void);
+ virtual short GetShortValue(void);
+ virtual int GetIntValue(void);
+ virtual double GetFloatValue(void);
+ virtual int GetScale(void) = 0;
+
+ // Methods
+ virtual void Reset(void) {}
+ virtual bool Compare(PXOB) = 0;
+ virtual bool Init(PGLOBAL) {return false;}
+ virtual bool Eval(PGLOBAL) {return false;}
+ virtual bool SetFormat(PGLOBAL, FORMAT&) = 0;
+
+ protected:
+ PVAL Value; // The current value of the object.
+ bool Constant; // true for an object having a constant value.
+ }; // end of class XOBJECT
+
+/***********************************************************************/
+/* Class XVOID: represent a void (null) object. */
+/* Used to represent a void parameter for count(*) or for a filter. */
+/***********************************************************************/
+class DllExport XVOID : public XOBJECT {
+ public:
+ XVOID(void) {Constant = true;}
+
+ // Implementation
+ virtual int GetType(void) {return TYPE_VOID;}
+ virtual int GetLength(void) {return 0;}
+ virtual int GetLengthEx(void) {return 0;}
+ virtual PSZ GetCharValue(void) {return NULL;}
+ virtual int GetIntValue(void) {return 0;}
+ virtual double GetFloatValue(void) {return 0.0;}
+ virtual int GetScale() {return 0;}
+
+ // Methods
+ virtual bool Compare(PXOB xp) {return xp->GetType() == TYPE_VOID;}
+ virtual bool SetFormat(PGLOBAL, FORMAT&) {return true;}
+ }; // end of class XVOID
+
+
+/***********************************************************************/
+/* Class CONSTANT: represents a constant XOBJECT of any value type. */
+/* Note that the CONSTANT class is a friend of the VALUE class; */
+/***********************************************************************/
+class DllExport CONSTANT : public XOBJECT {
+ public:
+ CONSTANT(PGLOBAL g, void *value, short type);
+ CONSTANT(PGLOBAL g, int n);
+ CONSTANT(PVAL valp) {Value = valp; Constant = true;}
+
+ // Implementation
+ virtual int GetType(void) {return TYPE_CONST;}
+ virtual int GetResultType(void) {return Value->Type;}
+ virtual int GetLength(void) {return Value->GetValLen();}
+ virtual int GetScale() {return Value->GetValPrec();}
+ virtual int GetLengthEx(void);
+
+ // Methods
+ virtual bool Compare(PXOB xp);
+ virtual bool SetFormat(PGLOBAL g, FORMAT& fmt)
+ {return Value->SetConstFormat(g, fmt);}
+ void Convert(PGLOBAL g, int newtype);
+ void SetValue(PVAL vp) {Value = vp;}
+ virtual void Print(PGLOBAL g, FILE *, uint);
+ virtual void Print(PGLOBAL g, char *, uint);
+ }; // end of class CONSTANT
+
+#endif
diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h index 1f937bba6c1..2d95acdb6d4 100644 --- a/storage/connect/xtable.h +++ b/storage/connect/xtable.h @@ -62,6 +62,8 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline PCOL GetColumns(void) {return Columns;} inline int GetDegree(void) {return Degree;} inline MODE GetMode(void) {return Mode;} + inline PFIL GetFilter(void) {return To_Filter;} + inline void SetFilter(PFIL fp) {To_Filter = fp;} inline void SetOrig(PTDB txp) {To_Orig = txp;} inline void SetUse(TUSE n) {Use = n;} inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;} @@ -72,16 +74,17 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline void SetDegree(int degree) {Degree = degree;} inline void SetMode(MODE mode) {Mode = mode;} - //Properties + // Properties virtual AMT GetAmType(void) {return TYPE_AM_ERROR;} virtual int GetTdb_No(void) {return Tdb_No;} virtual PTDB GetNext(void) {return Next;} virtual PCATLG GetCat(void) {return NULL;} + virtual void SetAbort(bool b) {;} // Methods virtual bool IsSame(PTDB tp) {return tp == this;} virtual bool GetBlockValues(PGLOBAL g) {return false;} - virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;} + virtual int Cardinality(PGLOBAL g) {return 0;} virtual int GetMaxSize(PGLOBAL) = 0; virtual int GetProgMax(PGLOBAL) = 0; virtual int GetProgCur(void) = 0; @@ -91,7 +94,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. virtual PTDB Duplicate(PGLOBAL g) {return NULL;} virtual PTDB CopyOne(PTABS t) {return this;} virtual PTDB Copy(PTABS t); - virtual void PrintAM(FILE *f, char *m) + virtual void PrintAM(FILE *f, char *m) {fprintf(f, "%s AM(%d)\n", m, GetAmType());} virtual void Print(PGLOBAL g, FILE *f, uint n); virtual void Print(PGLOBAL g, char *ps, uint z); @@ -113,6 +116,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. // Members PTDB To_Orig; // Pointer to original if it is a copy TUSE Use; + PFIL To_Filter; PCFIL To_CondFil; // To condition filter structure static int Tnum; // Used to generate Tdb_no's const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN @@ -122,6 +126,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. PCOL Columns; // Points to the first column of the table MODE Mode; // 10 Read, 30 Update, 40 Insert, 50 Delete int Degree; // Number of columns + int Cardinal; // Table number of rows }; // end of class TDB /***********************************************************************/ @@ -142,6 +147,7 @@ class DllExport TDBASE : public TDB { inline PKXBASE GetKindex(void) {return To_Kindex;} inline PCOL GetSetCols(void) {return To_SetCols;} inline void SetSetCols(PCOL colp) {To_SetCols = colp;} + inline void SetXdp(PIXDEF xdp) {To_Xdp = xdp;} inline void SetKindex(PKXBASE kxp) {To_Kindex = kxp;} // Properties @@ -154,7 +160,7 @@ class DllExport TDBASE : public TDB { virtual PSZ GetPath(void); virtual void PrintAM(FILE *f, char *m); virtual RECFM GetFtype(void) {return RECFM_NAF;} - virtual int GetAffectedRows(void) {return -1;} +//virtual int GetAffectedRows(void) {return -1;} virtual int GetRecpos(void) = 0; virtual bool SetRecpos(PGLOBAL g, int recpos); virtual bool IsReadOnly(void) {return Read_Only;} @@ -168,7 +174,7 @@ class DllExport TDBASE : public TDB { virtual void ResetDB(void) {} virtual void ResetSize(void) {MaxSize = -1;} virtual void RestoreNrec(void) {} - virtual int ResetTableOpt(PGLOBAL g, bool dox); + virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); virtual PSZ GetServer(void) {return "Current";} // Database routines @@ -181,8 +187,7 @@ class DllExport TDBASE : public TDB { virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) {strcpy(g->Message, "Remote index"); return RC_INFO;} virtual bool ReadKey(PGLOBAL g, OPVAL op, const void *key, int len) - {assert(false); return true;} - + {assert(false); return true;} protected: // Members @@ -190,8 +195,10 @@ class DllExport TDBASE : public TDB { PXOB *To_Link; // Points to column of previous relations PCOL *To_Key_Col; // Points to key columns in current file PKXBASE To_Kindex; // Points to table key index + PIXDEF To_Xdp; // To the index definition block PCOL To_SetCols; // Points to updated columns - int MaxSize; // Max size in number of lines + RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT) + int MaxSize; // Max size in number of lines int Knum; // Size of key arrays bool Read_Only; // True for read only tables const CHARSET_INFO *m_data_charset; @@ -213,7 +220,7 @@ class DllExport TDBCAT : public TDBASE { virtual int GetRecpos(void) {return N;} virtual int GetProgCur(void) {return N;} virtual int RowNumber(PGLOBAL g, bool b = false) {return N + 1;} - virtual bool SetRecpos(PGLOBAL g, int recpos); + virtual bool SetRecpos(PGLOBAL g, int recpos); // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); @@ -231,9 +238,9 @@ class DllExport TDBCAT : public TDBASE { bool InitCol(PGLOBAL g); // Members - PQRYRES Qrp; + PQRYRES Qrp; int N; // Row number - bool Init; + bool Init; }; // end of class TDBCAT /***********************************************************************/ diff --git a/storage/heap/hp_block.c b/storage/heap/hp_block.c index 1c40f982422..aa5343a0717 100644 --- a/storage/heap/hp_block.c +++ b/storage/heap/hp_block.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -76,8 +76,8 @@ int hp_get_new_block(HP_SHARE *info, HP_BLOCK *block, size_t *alloc_length) When level 1 is full, we allocate data for HPTRS_IN_NODE at level 2 and 1 + X rows at level 0. */ - *alloc_length= (sizeof(HP_PTRS)* ((i == block->levels) ? i : i - 1) + - block->records_in_block* block->recbuffer); + *alloc_length= (sizeof(HP_PTRS) * ((i == block->levels) ? i : i - 1) + + (ulonglong)block->records_in_block * block->recbuffer); if (!(root=(HP_PTRS*) my_malloc(*alloc_length, MYF(MY_WME | (info->internal ? diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index 30831f229ac..a68a35e63fb 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -1,4 +1,5 @@ -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 1d2f313a07c..34a72f360be 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -202,15 +202,6 @@ btr_rec_free_externally_stored_fields( mtr_t* mtr); /*!< in: mini-transaction handle which contains an X-latch to record page and to the index tree */ -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. -@return externally stored part, in units of a database page */ -static -ulint -btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ #endif /* !UNIV_HOTBACKUP */ /******************************************************//** @@ -271,6 +262,7 @@ btr_cur_latch_leaves( case BTR_MODIFY_TREE: /* x-latch also brothers from left to right */ left_page_no = btr_page_get_prev(page, mtr); + mode = latch_mode; if (left_page_no != FIL_NULL) { get_block = btr_block_get( @@ -4043,15 +4035,15 @@ btr_rec_get_field_ref_offs( #define btr_rec_get_field_ref(rec, offsets, n) \ ((rec) + btr_rec_get_field_ref_offs(offsets, n)) -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() @return externally stored part, in units of a database page */ -static + ulint btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ + const rec_t* rec, + const ulint* offsets) { ulint n_fields; ulint total_extern_len = 0; diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 3cce75abe74..fa2edb90b8e 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2183,6 +2183,10 @@ af_get_pct_for_dirty() { ulint dirty_pct = buf_get_modified_ratio_pct(); + if (dirty_pct > 0 && srv_max_buf_pool_modified_pct == 0) { + return(100); + } + ut_a(srv_max_dirty_pages_pct_lwm <= srv_max_buf_pool_modified_pct); diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index ec30c063a72..64409e1993d 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -2263,6 +2263,24 @@ buf_LRU_block_remove_hashed( " in the hash table\n", (ulong) bpage->space, (ulong) bpage->offset); +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: in_page_hash %lu in_zip_hash %lu\n" + " in_free_list %lu in_flush_list %lu in_LRU_list %lu\n" + " zip.data %p zip_size %lu page_state %d\n", + bpage->in_page_hash, bpage->in_zip_hash, + bpage->in_free_list, bpage->in_flush_list, + bpage->in_LRU_list, bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#else + fprintf(stderr, + "InnoDB: zip.data %p zip_size %lu page_state %d\n", + bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#endif + if (hashed_bpage) { fprintf(stderr, "InnoDB: In hash table we find block" diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 86a903d925e..c53f7e82f58 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -50,6 +50,7 @@ UNIV_INTERN dict_index_t* dict_ind_compact; #include "btr0btr.h" #include "btr0cur.h" #include "btr0sea.h" +#include "os0once.h" #include "page0zip.h" #include "page0page.h" #include "pars0pars.h" @@ -102,7 +103,7 @@ UNIV_INTERN ulong zip_pad_max = 50; UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key; UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key; UNIV_INTERN mysql_pfs_key_t index_online_log_key; -UNIV_INTERN mysql_pfs_key_t dict_table_stats_latch_key; +UNIV_INTERN mysql_pfs_key_t dict_table_stats_key; #endif /* UNIV_PFS_RWLOCK */ #ifdef UNIV_PFS_MUTEX @@ -121,6 +122,11 @@ UNIV_INTERN mysql_pfs_key_t dict_foreign_err_mutex_key; /** Identifies generated InnoDB foreign key names */ static char dict_ibfk[] = "_ibfk_"; +bool innodb_table_stats_not_found = false; +bool innodb_index_stats_not_found = false; +static bool innodb_table_stats_not_found_reported = false; +static bool innodb_index_stats_not_found_reported = false; + /*******************************************************************//** Tries to find column names for the index and sets the col field of the index. @@ -319,6 +325,82 @@ dict_mutex_exit_for_mysql(void) mutex_exit(&(dict_sys->mutex)); } +/** Allocate and init a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table_void table whose stats latch to create */ +static +void +dict_table_stats_latch_alloc( + void* table_void) +{ + dict_table_t* table = static_cast<dict_table_t*>(table_void); + + table->stats_latch = new(std::nothrow) rw_lock_t; + + ut_a(table->stats_latch != NULL); + + rw_lock_create(dict_table_stats_key, table->stats_latch, + SYNC_INDEX_TREE); +} + +/** Deinit and free a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table table whose stats latch to free */ +static +void +dict_table_stats_latch_free( + dict_table_t* table) +{ + rw_lock_free(table->stats_latch); + delete table->stats_latch; +} + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled) +{ + if (!enabled) { + table->stats_latch = NULL; + table->stats_latch_created = os_once::DONE; + return; + } + +#ifdef HAVE_ATOMIC_BUILTINS + /* We create this lazily the first time it is used. */ + table->stats_latch = NULL; + table->stats_latch_created = os_once::NEVER_DONE; +#else /* HAVE_ATOMIC_BUILTINS */ + + dict_table_stats_latch_alloc(table); + + table->stats_latch_created = os_once::DONE; +#endif /* HAVE_ATOMIC_BUILTINS */ +} + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table) +{ + if (table->stats_latch_created == os_once::DONE + && table->stats_latch != NULL) { + + dict_table_stats_latch_free(table); + } +} + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. */ UNIV_INTERN @@ -331,6 +413,14 @@ dict_table_stats_lock( ut_ad(table != NULL); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); +#ifdef HAVE_ATOMIC_BUILTINS + os_once::do_or_wait_for_done( + &table->stats_latch_created, + dict_table_stats_latch_alloc, table); +#else /* HAVE_ATOMIC_BUILTINS */ + ut_ad(table->stats_latch_created == os_once::DONE); +#endif /* HAVE_ATOMIC_BUILTINS */ + if (table->stats_latch == NULL) { /* This is a dummy table object that is private in the current thread and is not shared between multiple threads, thus we @@ -5212,8 +5302,6 @@ dict_table_print( index = UT_LIST_GET_NEXT(indexes, index); } - table->stat_initialized = FALSE; - dict_table_stats_unlock(table, RW_X_LATCH); foreign = UT_LIST_GET_FIRST(table->foreign_list); @@ -6016,14 +6104,34 @@ dict_table_schema_check( table = dict_table_get_low(req_schema->table_name); if (table == NULL) { + bool should_print=true; /* no such table */ - ut_snprintf(errstr, errstr_sz, - "Table %s not found.", - ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf))); + if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_table_stats") == 0) { + if (innodb_table_stats_not_found_reported == false) { + innodb_table_stats_not_found = true; + innodb_table_stats_not_found_reported = true; + } else { + should_print = false; + } + } else if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_index_stats") == 0 ) { + if (innodb_index_stats_not_found_reported == false) { + innodb_index_stats_not_found = true; + innodb_index_stats_not_found_reported = true; + } else { + should_print = false; + } + } - return(DB_TABLE_NOT_FOUND); + if (should_print) { + ut_snprintf(errstr, errstr_sz, + "Table %s not found.", + ut_format_name(req_schema->table_name, + TRUE, buf, sizeof(buf))); + return(DB_TABLE_NOT_FOUND); + } else { + return(DB_STATS_DO_NOT_EXIST); + } } if (table->ibd_file_missing) { diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc index 60daeea3a96..6310b2fd225 100644 --- a/storage/innobase/dict/dict0mem.cc +++ b/storage/innobase/dict/dict0mem.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -95,9 +95,9 @@ dict_mem_table_create( ut_d(table->magic_n = DICT_TABLE_MAGIC_N); - table->stats_latch = new rw_lock_t; - rw_lock_create(dict_table_stats_latch_key, table->stats_latch, - SYNC_INDEX_TREE); + /* true means that the stats latch will be enabled - + dict_table_stats_lock() will not be noop. */ + dict_table_stats_latch_create(table, true); #ifndef UNIV_HOTBACKUP table->autoinc_lock = static_cast<ib_lock_t*>( @@ -154,8 +154,7 @@ dict_mem_table_free( mutex_free(&(table->autoinc_mutex)); #endif /* UNIV_HOTBACKUP */ - rw_lock_free(table->stats_latch); - delete table->stats_latch; + dict_table_stats_latch_destroy(table); ut_free(table->name); mem_heap_free(table->heap); diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 928bdb3f2ef..1eac9e0df51 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -46,6 +46,7 @@ Created Jan 06, 2010 Vasil Dimov #include "ut0rnd.h" /* ut_rnd_interval() */ #include "ut0ut.h" /* ut_format_name(), ut_time() */ +#include <algorithm> #include <map> #include <vector> @@ -127,10 +128,11 @@ where n=1..n_uniq. #endif /* UNIV_STATS_DEBUG */ /* Gets the number of leaf pages to sample in persistent stats estimation */ -#define N_SAMPLE_PAGES(index) \ - ((index)->table->stats_sample_pages != 0 ? \ - (index)->table->stats_sample_pages : \ - srv_stats_persistent_sample_pages) +#define N_SAMPLE_PAGES(index) \ + static_cast<ib_uint64_t>( \ + (index)->table->stats_sample_pages != 0 \ + ? (index)->table->stats_sample_pages \ + : srv_stats_persistent_sample_pages) /* number of distinct records on a given level that are required to stop descending to lower levels and fetch N_SAMPLE_PAGES(index) records @@ -268,10 +270,12 @@ dict_stats_persistent_storage_check( mutex_exit(&(dict_sys->mutex)); } - if (ret != DB_SUCCESS) { + if (ret != DB_SUCCESS && ret != DB_STATS_DO_NOT_EXIST) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: %s\n", errstr); return(false); + } else if (ret == DB_STATS_DO_NOT_EXIST) { + return false; } /* else */ @@ -430,9 +434,9 @@ dict_stats_table_clone_create( t->corrupted = table->corrupted; /* This private object "t" is not shared with other threads, so - we do not need the stats_latch. The lock/unlock routines will do - nothing if stats_latch is NULL. */ - t->stats_latch = NULL; + we do not need the stats_latch (thus we pass false below). The + dict_table_stats_lock()/unlock() routines will do nothing. */ + dict_table_stats_latch_create(t, false); UT_LIST_INIT(t->indexes); @@ -508,6 +512,7 @@ dict_stats_table_clone_free( /*========================*/ dict_table_t* t) /*!< in: dummy table object to free */ { + dict_table_stats_latch_destroy(t); mem_heap_free(t->heap); } @@ -1283,35 +1288,40 @@ enum page_scan_method_t { }; /* @} */ -/*********************************************************************//** -Scan a page, reading records from left to right and counting the number -of distinct records on that page (looking only at the first n_prefix -columns). If scan_method is QUIT_ON_FIRST_NON_BORING then the function +/** Scan a page, reading records from left to right and counting the number +of distinct records (looking only at the first n_prefix +columns) and the number of external pages pointed by records from this page. +If scan_method is QUIT_ON_FIRST_NON_BORING then the function will return as soon as it finds a record that does not match its neighbor to the right, which means that in the case of QUIT_ON_FIRST_NON_BORING the returned n_diff can either be 0 (empty page), 1 (the whole page has all keys equal) or 2 (the function found a non-boring record and returned). +@param[out] out_rec record, or NULL +@param[out] offsets1 rec_get_offsets() working space (must +be big enough) +@param[out] offsets2 rec_get_offsets() working space (must +be big enough) +@param[in] index index of the page +@param[in] page the page to scan +@param[in] n_prefix look at the first n_prefix columns +@param[in] scan_method scan to the end of the page or not +@param[out] n_diff number of distinct records encountered +@param[out] n_external_pages if this is non-NULL then it will be set +to the number of externally stored pages which were encountered @return offsets1 or offsets2 (the offsets of *out_rec), or NULL if the page is empty and does not contain user records. */ -UNIV_INLINE __attribute__((nonnull)) +UNIV_INLINE ulint* dict_stats_scan_page( -/*=================*/ - const rec_t** out_rec, /*!< out: record, or NULL */ - ulint* offsets1, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - ulint* offsets2, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - dict_index_t* index, /*!< in: index of the page */ - const page_t* page, /*!< in: the page to scan */ - ulint n_prefix, /*!< in: look at the first - n_prefix columns */ - page_scan_method_t scan_method, /*!< in: scan to the end of - the page or not */ - ib_uint64_t* n_diff) /*!< out: number of distinct - records encountered */ + const rec_t** out_rec, + ulint* offsets1, + ulint* offsets2, + dict_index_t* index, + const page_t* page, + ulint n_prefix, + page_scan_method_t scan_method, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages) { ulint* offsets_rec = offsets1; ulint* offsets_next_rec = offsets2; @@ -1329,6 +1339,12 @@ dict_stats_scan_page( get_next = page_rec_get_next_const; } + const bool should_count_external_pages = n_external_pages != NULL; + + if (should_count_external_pages) { + *n_external_pages = 0; + } + rec = get_next(page_get_infimum_rec(page)); if (page_rec_is_supremum(rec)) { @@ -1341,6 +1357,11 @@ dict_stats_scan_page( offsets_rec = rec_get_offsets(rec, index, offsets_rec, ULINT_UNDEFINED, &heap); + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(rec); *n_diff = 1; @@ -1391,6 +1412,11 @@ dict_stats_scan_page( offsets_next_rec = offsets_tmp; } + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(next_rec); } @@ -1401,19 +1427,25 @@ func_exit: return(offsets_rec); } -/*********************************************************************//** -Dive below the current position of a cursor and calculate the number of +/** Dive below the current position of a cursor and calculate the number of distinct records on the leaf page, when looking at the fist n_prefix -columns. +columns. Also calculate the number of external pages pointed by records +on the leaf page. +@param[in] cur cursor +@param[in] n_prefix look at the first n_prefix columns +when comparing records +@param[out] n_diff number of distinct records +@param[out] n_external_pages number of external pages +@param[in,out] mtr mini-transaction @return number of distinct records on the leaf page */ static -ib_uint64_t +void dict_stats_analyze_index_below_cur( -/*===============================*/ - const btr_cur_t*cur, /*!< in: cursor */ - ulint n_prefix, /*!< in: look at the first n_prefix - columns when comparing records */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + const btr_cur_t* cur, + ulint n_prefix, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages, + mtr_t* mtr) { dict_index_t* index; ulint space; @@ -1426,7 +1458,6 @@ dict_stats_analyze_index_below_cur( ulint* offsets1; ulint* offsets2; ulint* offsets_rec; - ib_uint64_t n_diff; /* the result */ ulint size; index = btr_cur_get_index(cur); @@ -1462,6 +1493,10 @@ dict_stats_analyze_index_below_cur( page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec); + /* assume no external pages by default - in case we quit from this + function without analyzing any leaf pages */ + *n_external_pages = 0; + /* descend to the leaf level on the B-tree */ for (;;) { @@ -1480,20 +1515,24 @@ dict_stats_analyze_index_below_cur( /* search for the first non-boring record on the page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - QUIT_ON_FIRST_NON_BORING, &n_diff); + QUIT_ON_FIRST_NON_BORING, n_diff, NULL); /* pages on level > 0 are not allowed to be empty */ ut_a(offsets_rec != NULL); /* if page is not empty (offsets_rec != NULL) then n_diff must be > 0, otherwise there is a bug in dict_stats_scan_page() */ - ut_a(n_diff > 0); + ut_a(*n_diff > 0); - if (n_diff == 1) { + if (*n_diff == 1) { /* page has all keys equal and the end of the page was reached by dict_stats_scan_page(), no need to descend to the leaf level */ mem_heap_free(heap); - return(1); + /* can't get an estimate for n_external_pages here + because we do not dive to the leaf level, assume no + external pages (*n_external_pages was assigned to 0 + above). */ + return; } /* else */ @@ -1501,7 +1540,7 @@ dict_stats_analyze_index_below_cur( first non-boring record it finds, then the returned n_diff can either be 0 (empty page), 1 (page has all keys equal) or 2 (non-boring record was found) */ - ut_a(n_diff == 2); + ut_a(*n_diff == 2); /* we have a non-boring record in rec, descend below it */ @@ -1512,11 +1551,14 @@ dict_stats_analyze_index_below_cur( ut_ad(btr_page_get_level(page, mtr) == 0); /* scan the leaf page and find the number of distinct keys, - when looking only at the first n_prefix columns */ + when looking only at the first n_prefix columns; also estimate + the number of externally stored pages pointed by records on this + page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, &n_diff); + COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff, + n_external_pages); #if 0 DEBUG_PRINTF(" %s(): n_diff below page_no=%lu: " UINT64PF "\n", @@ -1524,133 +1566,146 @@ dict_stats_analyze_index_below_cur( #endif mem_heap_free(heap); - - return(n_diff); } -/*********************************************************************//** -For a given level in an index select N_SAMPLE_PAGES(index) -(or less) records from that level and dive below them to the corresponding -leaf pages, then scan those leaf pages and save the sampling results in -index->stat_n_diff_key_vals[n_prefix - 1] and the number of pages scanned in -index->stat_n_sample_sizes[n_prefix - 1]. */ +/** Input data that is used to calculate dict_index_t::stat_n_diff_key_vals[] +for each n-columns prefix (n from 1 to n_uniq). */ +struct n_diff_data_t { + /** Index of the level on which the descent through the btree + stopped. level 0 is the leaf level. This is >= 1 because we + avoid scanning the leaf level because it may contain too many + pages and doing so is useless when combined with the random dives - + if we are to scan the leaf level, this means a full scan and we can + simply do that instead of fiddling with picking random records higher + in the tree and to dive below them. At the start of the analyzing + we may decide to do full scan of the leaf level, but then this + structure is not used in that code path. */ + ulint level; + + /** Number of records on the level where the descend through the btree + stopped. When we scan the btree from the root, we stop at some mid + level, choose some records from it and dive below them towards a leaf + page to analyze. */ + ib_uint64_t n_recs_on_level; + + /** Number of different key values that were found on the mid level. */ + ib_uint64_t n_diff_on_level; + + /** Number of leaf pages that are analyzed. This is also the same as + the number of records that we pick from the mid level and dive below + them. */ + ib_uint64_t n_leaf_pages_to_analyze; + + /** Cumulative sum of the number of different key values that were + found on all analyzed pages. */ + ib_uint64_t n_diff_all_analyzed_pages; + + /** Cumulative sum of the number of external pages (stored outside of + the btree but in the same file segment). */ + ib_uint64_t n_external_pages_sum; +}; + +/** Estimate the number of different key values in an index when looking at +the first n_prefix columns. For a given level in an index select +n_diff_data->n_leaf_pages_to_analyze records from that level and dive below +them to the corresponding leaf pages, then scan those leaf pages and save the +sampling results in n_diff_data->n_diff_all_analyzed_pages. +@param[in] index index +@param[in] n_prefix look at first 'n_prefix' columns when +comparing records +@param[in] boundaries a vector that contains +n_diff_data->n_diff_on_level integers each of which represents the index (on +level 'level', counting from left/smallest to right/biggest from 0) of the +last record from each group of distinct keys +@param[in,out] n_diff_data n_diff_all_analyzed_pages and +n_external_pages_sum in this structure will be set by this function. The +members level, n_diff_on_level and n_leaf_pages_to_analyze must be set by the +caller in advance - they are used by some calculations inside this function +@param[in,out] mtr mini-transaction */ static void dict_stats_analyze_index_for_n_prefix( -/*==================================*/ - dict_index_t* index, /*!< in/out: index */ - ulint level, /*!< in: level, must be >= 1 */ - ib_uint64_t total_recs_on_level, - /*!< in: total number of - records on the given level */ - ulint n_prefix, /*!< in: look at first - n_prefix columns when - comparing records */ - ib_uint64_t n_diff_for_this_prefix, - /*!< in: number of distinct - records on the given level, - when looking at the first - n_prefix columns */ - boundaries_t* boundaries, /*!< in: array that contains - n_diff_for_this_prefix - integers each of which - represents the index (on the - level, counting from - left/smallest to right/biggest - from 0) of the last record - from each group of distinct - keys */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + dict_index_t* index, + ulint n_prefix, + const boundaries_t* boundaries, + n_diff_data_t* n_diff_data, + mtr_t* mtr) { btr_pcur_t pcur; const page_t* page; ib_uint64_t rec_idx; - ib_uint64_t last_idx_on_level; - ib_uint64_t n_recs_to_dive_below; - ib_uint64_t n_diff_sum_of_all_analyzed_pages; ib_uint64_t i; #if 0 DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu, n_prefix=%lu, " - "n_diff_for_this_prefix=" UINT64PF ")\n", + "n_diff_on_level=" UINT64PF ")\n", __func__, index->table->name, index->name, level, - n_prefix, n_diff_for_this_prefix); + n_prefix, n_diff_data->n_diff_on_level); #endif ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_S_LOCK)); - /* if some of those is 0 then this means that there is exactly one - page in the B-tree and it is empty and we should have done full scan - and should not be here */ - ut_ad(total_recs_on_level > 0); - ut_ad(n_diff_for_this_prefix > 0); - - /* this must be at least 1 */ - ut_ad(N_SAMPLE_PAGES(index) > 0); - /* Position pcur on the leftmost record on the leftmost page on the desired level. */ btr_pcur_open_at_index_side( true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED, - &pcur, true, level, mtr); + &pcur, true, n_diff_data->level, mtr); btr_pcur_move_to_next_on_page(&pcur); page = btr_pcur_get_page(&pcur); + const rec_t* first_rec = btr_pcur_get_rec(&pcur); + + /* We shouldn't be scanning the leaf level. The caller of this function + should have stopped the descend on level 1 or higher. */ + ut_ad(n_diff_data->level > 0); + ut_ad(!page_is_leaf(page)); + /* The page must not be empty, except when it is the root page (and the whole index is empty). */ - ut_ad(btr_pcur_is_on_user_rec(&pcur) || page_is_leaf(page)); - ut_ad(btr_pcur_get_rec(&pcur) - == page_rec_get_next_const(page_get_infimum_rec(page))); + ut_ad(btr_pcur_is_on_user_rec(&pcur)); + ut_ad(first_rec == page_rec_get_next_const(page_get_infimum_rec(page))); /* check that we are indeed on the desired level */ - ut_a(btr_page_get_level(page, mtr) == level); + ut_a(btr_page_get_level(page, mtr) == n_diff_data->level); /* there should not be any pages on the left */ ut_a(btr_page_get_prev(page, mtr) == FIL_NULL); /* check whether the first record on the leftmost page is marked - as such, if we are on a non-leaf level */ - ut_a((level == 0) - == !(REC_INFO_MIN_REC_FLAG & rec_get_info_bits( - btr_pcur_get_rec(&pcur), page_is_comp(page)))); + as such; we are on a non-leaf level */ + ut_a(rec_get_info_bits(first_rec, page_is_comp(page)) + & REC_INFO_MIN_REC_FLAG); - last_idx_on_level = boundaries->at( - static_cast<unsigned int>(n_diff_for_this_prefix - 1)); + const ib_uint64_t last_idx_on_level = boundaries->at( + static_cast<unsigned>(n_diff_data->n_diff_on_level - 1)); rec_idx = 0; - n_diff_sum_of_all_analyzed_pages = 0; - - n_recs_to_dive_below = ut_min(N_SAMPLE_PAGES(index), - n_diff_for_this_prefix); - - for (i = 0; i < n_recs_to_dive_below; i++) { - ib_uint64_t left; - ib_uint64_t right; - ib_uint64_t rnd; - ib_uint64_t dive_below_idx; + n_diff_data->n_diff_all_analyzed_pages = 0; + n_diff_data->n_external_pages_sum = 0; - /* there are n_diff_for_this_prefix elements + for (i = 0; i < n_diff_data->n_leaf_pages_to_analyze; i++) { + /* there are n_diff_on_level elements in 'boundaries' and we divide those elements - into n_recs_to_dive_below segments, for example: + into n_leaf_pages_to_analyze segments, for example: - let n_diff_for_this_prefix=100, n_recs_to_dive_below=4, then: + let n_diff_on_level=100, n_leaf_pages_to_analyze=4, then: segment i=0: [0, 24] segment i=1: [25, 49] segment i=2: [50, 74] segment i=3: [75, 99] or - let n_diff_for_this_prefix=1, n_recs_to_dive_below=1, then: + let n_diff_on_level=1, n_leaf_pages_to_analyze=1, then: segment i=0: [0, 0] or - let n_diff_for_this_prefix=2, n_recs_to_dive_below=2, then: + let n_diff_on_level=2, n_leaf_pages_to_analyze=2, then: segment i=0: [0, 0] segment i=1: [1, 1] or - let n_diff_for_this_prefix=13, n_recs_to_dive_below=7, then: + let n_diff_on_level=13, n_leaf_pages_to_analyze=7, then: segment i=0: [0, 0] segment i=1: [1, 2] segment i=2: [3, 4] @@ -1661,9 +1716,12 @@ dict_stats_analyze_index_for_n_prefix( then we select a random record from each segment and dive below it */ - left = n_diff_for_this_prefix * i / n_recs_to_dive_below; - right = n_diff_for_this_prefix * (i + 1) - / n_recs_to_dive_below - 1; + const ib_uint64_t n_diff = n_diff_data->n_diff_on_level; + const ib_uint64_t n_pick + = n_diff_data->n_leaf_pages_to_analyze; + + const ib_uint64_t left = n_diff * i / n_pick; + const ib_uint64_t right = n_diff * (i + 1) / n_pick - 1; ut_a(left <= right); ut_a(right <= last_idx_on_level); @@ -1671,11 +1729,11 @@ dict_stats_analyze_index_for_n_prefix( /* we do not pass (left, right) because we do not want to ask ut_rnd_interval() to work with too big numbers since ib_uint64_t could be bigger than ulint */ - rnd = static_cast<ib_uint64_t>( - ut_rnd_interval(0, static_cast<ulint>(right - left))); + const ulint rnd = ut_rnd_interval( + 0, static_cast<ulint>(right - left)); - dive_below_idx = boundaries->at( - static_cast<unsigned int>(left + rnd)); + const ib_uint64_t dive_below_idx + = boundaries->at(static_cast<unsigned>(left + rnd)); #if 0 DEBUG_PRINTF(" %s(): dive below record with index=" @@ -1711,9 +1769,13 @@ dict_stats_analyze_index_for_n_prefix( ut_a(rec_idx == dive_below_idx); ib_uint64_t n_diff_on_leaf_page; + ib_uint64_t n_external_pages; - n_diff_on_leaf_page = dict_stats_analyze_index_below_cur( - btr_pcur_get_btr_cur(&pcur), n_prefix, mtr); + dict_stats_analyze_index_below_cur(btr_pcur_get_btr_cur(&pcur), + n_prefix, + &n_diff_on_leaf_page, + &n_external_pages, + mtr); /* We adjust n_diff_on_leaf_page here to avoid counting one record twice - once as the last on some page and once @@ -1733,37 +1795,86 @@ dict_stats_analyze_index_for_n_prefix( n_diff_on_leaf_page--; } - n_diff_sum_of_all_analyzed_pages += n_diff_on_leaf_page; - } - - /* n_diff_sum_of_all_analyzed_pages can be 0 here if all the leaf - pages sampled contained only delete-marked records. In this case - we should assign 0 to index->stat_n_diff_key_vals[n_prefix - 1], which - the formula below does. */ + n_diff_data->n_diff_all_analyzed_pages += n_diff_on_leaf_page; - /* See REF01 for an explanation of the algorithm */ - index->stat_n_diff_key_vals[n_prefix - 1] - = index->stat_n_leaf_pages - - * n_diff_for_this_prefix - / total_recs_on_level - - * n_diff_sum_of_all_analyzed_pages - / n_recs_to_dive_below; + n_diff_data->n_external_pages_sum += n_external_pages; + } - index->stat_n_sample_sizes[n_prefix - 1] = n_recs_to_dive_below; + btr_pcur_close(&pcur); +} - DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu " - "(%lu" - " * " UINT64PF " / " UINT64PF - " * " UINT64PF " / " UINT64PF ")\n", - __func__, index->stat_n_diff_key_vals[n_prefix - 1], - n_prefix, - index->stat_n_leaf_pages, - n_diff_for_this_prefix, total_recs_on_level, - n_diff_sum_of_all_analyzed_pages, n_recs_to_dive_below); +/** Set dict_index_t::stat_n_diff_key_vals[] and stat_n_sample_sizes[]. +@param[in] n_diff_data input data to use to derive the results +@param[in,out] index index whose stat_n_diff_key_vals[] to set */ +UNIV_INLINE +void +dict_stats_index_set_n_diff( + const n_diff_data_t* n_diff_data, + dict_index_t* index) +{ + for (ulint n_prefix = dict_index_get_n_unique(index); + n_prefix >= 1; + n_prefix--) { + /* n_diff_all_analyzed_pages can be 0 here if + all the leaf pages sampled contained only + delete-marked records. In this case we should assign + 0 to index->stat_n_diff_key_vals[n_prefix - 1], which + the formula below does. */ + + const n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + ut_ad(data->n_leaf_pages_to_analyze > 0); + ut_ad(data->n_recs_on_level > 0); + + ulint n_ordinary_leaf_pages; + + if (data->level == 1) { + /* If we know the number of records on level 1, then + this number is the same as the number of pages on + level 0 (leaf). */ + n_ordinary_leaf_pages = data->n_recs_on_level; + } else { + /* If we analyzed D ordinary leaf pages and found E + external pages in total linked from those D ordinary + leaf pages, then this means that the ratio + ordinary/external is D/E. Then the ratio ordinary/total + is D / (D + E). Knowing that the total number of pages + is T (including ordinary and external) then we estimate + that the total number of ordinary leaf pages is + T * D / (D + E). */ + n_ordinary_leaf_pages + = index->stat_n_leaf_pages + * data->n_leaf_pages_to_analyze + / (data->n_leaf_pages_to_analyze + + data->n_external_pages_sum); + } - btr_pcur_close(&pcur); + /* See REF01 for an explanation of the algorithm */ + index->stat_n_diff_key_vals[n_prefix - 1] + = n_ordinary_leaf_pages + + * data->n_diff_on_level + / data->n_recs_on_level + + * data->n_diff_all_analyzed_pages + / data->n_leaf_pages_to_analyze; + + index->stat_n_sample_sizes[n_prefix - 1] + = data->n_leaf_pages_to_analyze; + + DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu" + " (%lu" + " * " UINT64PF " / " UINT64PF + " * " UINT64PF " / " UINT64PF ")\n", + __func__, + index->stat_n_diff_key_vals[n_prefix - 1], + n_prefix, + index->stat_n_leaf_pages, + data->n_diff_on_level, + data->n_recs_on_level, + data->n_diff_all_analyzed_pages, + data->n_leaf_pages_to_analyze); + } } /*********************************************************************//** @@ -1781,10 +1892,8 @@ dict_stats_analyze_index( bool level_is_analyzed; ulint n_uniq; ulint n_prefix; - ib_uint64_t* n_diff_on_level; ib_uint64_t total_recs; ib_uint64_t total_pages; - boundaries_t* n_diff_boundaries; mtr_t mtr; ulint size; DBUG_ENTER("dict_stats_analyze_index"); @@ -1870,11 +1979,18 @@ dict_stats_analyze_index( DBUG_VOID_RETURN; } - /* set to zero */ - n_diff_on_level = reinterpret_cast<ib_uint64_t*> - (mem_zalloc(n_uniq * sizeof(ib_uint64_t))); + /* For each level that is being scanned in the btree, this contains the + number of different key values for all possible n-column prefixes. */ + ib_uint64_t* n_diff_on_level = new ib_uint64_t[n_uniq]; - n_diff_boundaries = new boundaries_t[n_uniq]; + /* For each level that is being scanned in the btree, this contains the + index of the last record from each group of equal records (when + comparing only the first n columns, n=1..n_uniq). */ + boundaries_t* n_diff_boundaries = new boundaries_t[n_uniq]; + + /* For each n-column prefix this array contains the input data that is + used to calculate dict_index_t::stat_n_diff_key_vals[]. */ + n_diff_data_t* n_diff_data = new n_diff_data_t[n_uniq]; /* total_recs is also used to estimate the number of pages on one level below, so at the start we have 1 page (the root) */ @@ -1986,12 +2102,12 @@ dict_stats_analyze_index( level_is_analyzed = true; - if (n_diff_on_level[n_prefix - 1] - >= N_DIFF_REQUIRED(index) - || level == 1) { - /* we found a good level with many distinct - records or we have reached the last level we - could scan */ + if (level == 1 + || n_diff_on_level[n_prefix - 1] + >= N_DIFF_REQUIRED(index)) { + /* we have reached the last level we could scan + or we found a good level with many distinct + records */ break; } @@ -2004,7 +2120,6 @@ found_level: " distinct records for n_prefix=%lu\n", __func__, level, n_diff_on_level[n_prefix - 1], n_prefix); - /* here we are either on level 1 or the level that we are on contains >= N_DIFF_REQUIRED distinct keys or we did not scan deeper levels because they would contain too many pages */ @@ -2013,20 +2128,47 @@ found_level: ut_ad(level_is_analyzed); + /* if any of these is 0 then there is exactly one page in the + B-tree and it is empty and we should have done full scan and + should not be here */ + ut_ad(total_recs > 0); + ut_ad(n_diff_on_level[n_prefix - 1] > 0); + + ut_ad(N_SAMPLE_PAGES(index) > 0); + + n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + data->level = level; + + data->n_recs_on_level = total_recs; + + data->n_diff_on_level = n_diff_on_level[n_prefix - 1]; + + data->n_leaf_pages_to_analyze = std::min( + N_SAMPLE_PAGES(index), + n_diff_on_level[n_prefix - 1]); + /* pick some records from this level and dive below them for the given n_prefix */ dict_stats_analyze_index_for_n_prefix( - index, level, total_recs, n_prefix, - n_diff_on_level[n_prefix - 1], - &n_diff_boundaries[n_prefix - 1], &mtr); + index, n_prefix, &n_diff_boundaries[n_prefix - 1], + data, &mtr); } mtr_commit(&mtr); delete[] n_diff_boundaries; - mem_free(n_diff_on_level); + delete[] n_diff_on_level; + + /* n_prefix == 0 means that the above loop did not end up prematurely + due to tree being changed and so n_diff_data[] is set up. */ + if (n_prefix == 0) { + dict_stats_index_set_n_diff(n_diff_data, index); + } + + delete[] n_diff_data; dict_stats_assert_initialized_index(index); DBUG_VOID_RETURN; @@ -2201,17 +2343,21 @@ dict_stats_save_index_stat( "END;", trx); if (ret != DB_SUCCESS) { - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Cannot save index statistics for table " - "%s, index %s, stat name \"%s\": %s\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index)), - stat_name, ut_strerr(ret)); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Cannot save index statistics for table " + "%s, index %s, stat name \"%s\": %s\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index)), + stat_name, ut_strerr(ret)); + index->stats_error_printed = true; + } } return(ret); @@ -2900,20 +3046,24 @@ dict_stats_update_for_index( } /* else */ - /* Fall back to transient stats since the persistent - storage is not present or is corrupted */ - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s index %s but the required " - "persistent statistics storage is not present or is " - "corrupted. Using transient stats instead.\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index))); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + /* Fall back to transient stats since the persistent + storage is not present or is corrupted */ + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s index %s but the required " + "persistent statistics storage is not present or is " + "corrupted. Using transient stats instead.\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index))); + index->stats_error_printed = false; + } } dict_table_stats_lock(index->table, RW_X_LATCH); @@ -2998,13 +3148,17 @@ dict_stats_update( /* Fall back to transient stats since the persistent storage is not present or is corrupted */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s but the required persistent " - "statistics storage is not present or is corrupted. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf))); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s but the required persistent " + "statistics storage is not present or is corrupted. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, buf, sizeof(buf))); + table->stats_error_printed = true; + } goto transient; @@ -3048,17 +3202,21 @@ dict_stats_update( /* persistent statistics storage does not exist or is corrupted, calculate the transient stats */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: Fetch of persistent " - "statistics requested for table %s but the " - "required system tables %s and %s are not " - "present or have unexpected structure. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, - buf, sizeof(buf)), - TABLE_STATS_NAME_PRINT, - INDEX_STATS_NAME_PRINT); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error: Fetch of persistent " + "statistics requested for table %s but the " + "required system tables %s and %s are not " + "present or have unexpected structure. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, + buf, sizeof(buf)), + TABLE_STATS_NAME_PRINT, + INDEX_STATS_NAME_PRINT); + table->stats_error_printed = true; + } goto transient; } @@ -3128,16 +3286,19 @@ dict_stats_update( dict_stats_table_clone_free(t); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error fetching persistent statistics " - "for table %s from %s and %s: %s. " - "Using transient stats method instead.\n", - ut_format_name(table->name, TRUE, buf, - sizeof(buf)), - TABLE_STATS_NAME, - INDEX_STATS_NAME, - ut_strerr(err)); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error fetching persistent statistics " + "for table %s from %s and %s: %s. " + "Using transient stats method instead.\n", + ut_format_name(table->name, TRUE, buf, + sizeof(buf)), + TABLE_STATS_NAME, + INDEX_STATS_NAME, + ut_strerr(err)); + } goto transient; } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 81fcba47812..f4e5721caa7 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -758,7 +758,7 @@ fil_node_open_file( fprintf(stderr, "InnoDB: Error: the size of single-table" " tablespace file %s\n" - "InnoDB: is only "UINT64PF"," + "InnoDB: is only " UINT64PF "," " should be at least %lu!\n", node->name, size_bytes, @@ -5725,7 +5725,7 @@ fil_io( ret = os_aio(type, mode | wake_later, node->name, node->handle, buf, offset, len, node, message); #endif /* UNIV_HOTBACKUP */ - ut_a(ret); + if (mode == OS_AIO_SYNC) { /* The i/o operation is already completed when we return from @@ -5740,7 +5740,10 @@ fil_io( ut_ad(fil_validate_skip()); } - return(DB_SUCCESS); + if (!ret) { + return(DB_OUT_OF_FILE_SPACE); + } else { + } return(DB_SUCCESS); } #ifndef UNIV_HOTBACKUP diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 4a667686795..f503cc487b7 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -44,6 +44,13 @@ Full Text Search interface /** Column name from the FTS config table */ #define FTS_MAX_CACHE_SIZE_IN_MB "cache_size_in_mb" +/** Verify if a aux table name is a obsolete table +by looking up the key word in the obsolete table names */ +#define FTS_IS_OBSOLETE_AUX_TABLE(table_name) \ + (strstr((table_name), "DOC_ID") != NULL \ + || strstr((table_name), "ADDED") != NULL \ + || strstr((table_name), "STOPWORDS") != NULL) + /** This is maximum FTS cache for each table and would be a configurable variable */ UNIV_INTERN ulong fts_max_cache_size; @@ -5837,6 +5844,12 @@ fts_is_aux_table_name( } } + /* Could be obsolete common tables. */ + if (strncmp(ptr, "ADDED", len) == 0 + || strncmp(ptr, "STOPWORDS", len) == 0) { + return(true); + } + /* Try and read the index id. */ if (!fts_read_object_id(&table->index_id, ptr)) { return(FALSE); @@ -6433,6 +6446,56 @@ fts_check_and_drop_orphaned_tables( mem_free(path); } + } else { + if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) { + + /* Current table could be one of the three + obsolete tables, in this case, we should + always try to drop it but not rename it. + This could happen when we try to upgrade + from older server to later one, which doesn't + contain these obsolete tables. */ + drop = true; + + dberr_t err; + trx_t* trx_drop = + trx_allocate_for_background(); + + trx_drop->op_info = "Drop obsolete aux tables"; + trx_drop->dict_operation_lock_mode = RW_X_LATCH; + + trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE); + + err = row_drop_table_for_mysql( + aux_table->name, trx_drop, false, true); + + trx_drop->dict_operation_lock_mode = 0; + + if (err != DB_SUCCESS) { + /* We don't need to worry about the + failure, since server would try to + drop it on next restart, even if + the table was broken. */ + + ib_logf(IB_LOG_LEVEL_WARN, + "Fail to drop obsolete aux" + " table '%s', which is" + " harmless. will try to drop" + " it on next restart.", + aux_table->name); + + fts_sql_rollback(trx_drop); + } else { + ib_logf(IB_LOG_LEVEL_INFO, + "Dropped obsolete aux" + " table '%s'.", + aux_table->name); + + fts_sql_commit(trx_drop); + } + + trx_free_for_background(trx_drop); + } } #ifdef _WIN32 if (!drop && rename) { diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc index a9f3a25530d..910a00cd521 100644 --- a/storage/innobase/fts/fts0opt.cc +++ b/storage/innobase/fts/fts0opt.cc @@ -95,7 +95,7 @@ enum fts_msg_type_t { /** Compressed list of words that have been read from FTS INDEX that needs to be optimized. */ struct fts_zip_t { - ulint status; /*!< Status of (un)/zip operation */ + lint status; /*!< Status of (un)/zip operation */ ulint n_words; /*!< Number of words compressed */ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 79c994a78a0..a33d9a1d5bb 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4,6 +4,7 @@ Copyright (c) 2000, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2013, 2014 SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -432,7 +433,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] = { {&trx_purge_latch_key, "trx_purge_latch", 0}, {&index_tree_rw_lock_key, "index_tree_rw_lock", 0}, {&index_online_log_key, "index_online_log", 0}, - {&dict_table_stats_latch_key, "dict_table_stats", 0}, + {&dict_table_stats_key, "dict_table_stats", 0}, {&hash_table_rw_lock_key, "hash_table_locks", 0} }; # endif /* UNIV_PFS_RWLOCK */ @@ -3504,6 +3505,14 @@ innobase_end( if (innodb_inited) { + THD *thd= current_thd; + if (thd) { // may be UNINSTALL PLUGIN statement + trx_t* trx = thd_to_trx(thd); + if (trx) { + trx_free_for_mysql(trx); + } + } + srv_fast_shutdown = (ulint) innobase_fast_shutdown; innodb_inited = 0; @@ -4254,7 +4263,7 @@ innobase_close_connection( sql_print_warning( "MySQL is closing a connection that has an active " - "InnoDB transaction. "TRX_ID_FMT" row modifications " + "InnoDB transaction. " TRX_ID_FMT " row modifications " "will roll back.", trx->undo_no); } @@ -4317,16 +4326,23 @@ innobase_kill_query( #endif /* WITH_WSREP */ trx = thd_to_trx(thd); - if (trx) - { - /* Cancel a pending lock request. */ - lock_mutex_enter(); - trx_mutex_enter(trx); - if (trx->lock.wait_lock) - lock_cancel_waiting_and_release(trx->lock.wait_lock); - trx_mutex_exit(trx); - lock_mutex_exit(); - } + if (trx) { + THD *cur = current_thd; + THD *owner = trx->current_lock_mutex_owner; + + /* Cancel a pending lock request. */ + if (owner != cur) { + lock_mutex_enter(); + } + trx_mutex_enter(trx); + if (trx->lock.wait_lock) { + lock_cancel_waiting_and_release(trx->lock.wait_lock); + } + trx_mutex_exit(trx); + if (owner != cur) { + lock_mutex_exit(); + } + } DBUG_VOID_RETURN; } @@ -4373,14 +4389,11 @@ handler::Table_flags ha_innobase::table_flags() const /*============================*/ { - THD *thd = ha_thd(); /* Need to use tx_isolation here since table flags is (also) called before prebuilt is inited. */ - ulong const tx_isolation = thd_tx_isolation(thd); + ulong const tx_isolation = thd_tx_isolation(ha_thd()); - if (tx_isolation <= ISO_READ_COMMITTED && - !(tx_isolation == ISO_READ_COMMITTED && - thd_rpl_is_parallel(thd))) { + if (tx_isolation <= ISO_READ_COMMITTED) { return(int_table_flags); } @@ -7871,7 +7884,7 @@ calc_row_difference( if (doc_id < prebuilt->table->fts->cache->next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be larger than" - " "IB_ID_FMT" for table", + " " IB_ID_FMT " for table", innodb_table->fts->cache->next_doc_id - 1); ut_print_name(stderr, trx, @@ -7883,9 +7896,9 @@ calc_row_difference( - prebuilt->table->fts->cache->next_doc_id) >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " Doc ID used "UINT64PF" cannot" + " Doc ID used " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, prebuilt->table->fts->cache->next_doc_id - 1, @@ -8625,6 +8638,29 @@ ha_innobase::innobase_get_index( index = innobase_index_lookup(share, keynr); if (index) { + if (!key || ut_strcmp(index->name, key->name) != 0) { + fprintf(stderr, "InnoDB: [Error] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + + for(ulint i=0; i < table->s->keys; i++) { + index = innobase_index_lookup(share, i); + key = table->key_info + keynr; + + if (index) { + + fprintf(stderr, "InnoDB: [Note] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + } + } + + } + ut_a(ut_strcmp(index->name, key->name) == 0); } else { /* Can't find index with keynr in the translation @@ -12501,6 +12537,34 @@ ha_innobase::info_low( break; } + DBUG_EXECUTE_IF("ib_ha_innodb_stat_not_initialized", + index->table->stat_initialized = FALSE;); + + if (!ib_table->stat_initialized || + (index->table != ib_table || + !index->table->stat_initialized)) { + fprintf(stderr, + "InnoDB: Warning: Index %s points to table %s" " and ib_table %s statistics is initialized %d " + " but index table %s initialized %d " + " mysql table is %s. Have you mixed " + "up .frm files from different " + "installations? " + "See " REFMAN + "innodb-troubleshooting.html\n", + index->name, + index->table->name, + ib_table->name, + ib_table->stat_initialized, + index->table->name, + index->table->stat_initialized, + table->s->table_name.str + ); + + /* This is better than + assert on below function */ + dict_stats_init(index->table); + } + rec_per_key = innodb_rec_per_key( index, j, stats.records); @@ -18191,6 +18255,11 @@ static MYSQL_SYSVAR_ULONG(saved_page_number_debug, NULL, innodb_save_page_no, 0, 0, UINT_MAX32, 0); #endif /* UNIV_DEBUG */ +static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures, + PLUGIN_VAR_NOCMDARG, + "Simulate compression failures.", + NULL, NULL, 0, 0, 99, 0); + static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(additional_mem_pool_size), MYSQL_SYSVAR(api_trx_level), @@ -18351,6 +18420,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(fil_make_page_dirty_debug), MYSQL_SYSVAR(saved_page_number_debug), #endif /* UNIV_DEBUG */ + MYSQL_SYSVAR(simulate_comp_failures), NULL }; @@ -18680,7 +18750,7 @@ ib_senderrf( va_start(args, code); - myf l; + myf l=0; switch(level) { case IB_LOG_LEVEL_INFO: diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 833166e783c..f1e4406fcf7 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -576,6 +576,17 @@ void btr_estimate_number_of_different_key_vals( /*======================================*/ dict_index_t* index); /*!< in: index */ + +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() +@return externally stored part, in units of a database page */ + +ulint +btr_rec_get_externally_stored_len( + const rec_t* rec, + const ulint* offsets); + /*******************************************************************//** Marks non-updated off-page fields as disowned by this record. The ownership must be transferred to the updated record which is inserted elsewhere in the diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index ce709a2e912..026187b2000 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -43,6 +43,9 @@ Created 1/8/1996 Heikki Tuuri #include "trx0types.h" #include "row0types.h" +extern bool innodb_table_stats_not_found; +extern bool innodb_index_stats_not_found; + #ifndef UNIV_HOTBACKUP # include "sync0sync.h" # include "sync0rw.h" @@ -1435,6 +1438,28 @@ UNIV_INTERN void dict_mutex_exit_for_mysql(void); /*===========================*/ + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled); + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table); + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. table->id is used to pick the corresponding latch from a global array of diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index c5ed8d92cb0..0e3981a2946 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -46,6 +46,7 @@ Created 1/8/1996 Heikki Tuuri #include "hash0hash.h" #include "trx0types.h" #include "fts0fts.h" +#include "os0once.h" /* Forward declaration. */ struct ib_rbt_t; @@ -627,6 +628,9 @@ struct dict_index_t{ ulint stat_n_leaf_pages; /*!< approximate number of leaf pages in the index tree */ + bool stats_error_printed; + /*!< has persistent statistics error printed + for this index ? */ /* @} */ rw_lock_t lock; /*!< read-write lock protecting the upper levels of the index tree */ @@ -842,6 +846,10 @@ struct dict_table_t{ initialized in dict_table_add_to_cache() */ /** Statistics for query optimization */ /* @{ */ + + volatile os_once::state_t stats_latch_created; + /*!< Creation state of 'stats_latch'. */ + rw_lock_t* stats_latch; /*!< this latch protects: dict_table_t::stat_initialized dict_table_t::stat_n_rows (*) @@ -950,6 +958,9 @@ struct dict_table_t{ /*!< see BG_STAT_* above. Writes are covered by dict_sys->mutex. Dirty reads are possible. */ + bool stats_error_printed; + /*!< Has persistent stats error beein + already printed for this table ? */ /* @} */ /*----------------------*/ /**!< The following fields are used by the diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 385853bdb68..88246afebdc 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -289,7 +289,7 @@ lock_rec_insert_check_and_lock( inserted record maybe should inherit LOCK_GAP type locks from the successor record */ - __attribute__((nonnull, warn_unused_result)); + __attribute__((nonnull(2,3,4,6,7), warn_unused_result)); /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (update, delete mark, or delete unmark) of a clustered index record. If they do, diff --git a/storage/innobase/include/os0once.h b/storage/innobase/include/os0once.h new file mode 100644 index 00000000000..a8bbaf1d2d4 --- /dev/null +++ b/storage/innobase/include/os0once.h @@ -0,0 +1,125 @@ +/***************************************************************************** + +Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/os0once.h +A class that aids executing a given function exactly once in a multi-threaded +environment. + +Created Feb 20, 2014 Vasil Dimov +*******************************************************/ + +#ifndef os0once_h +#define os0once_h + +#include "univ.i" + +#include "os0sync.h" +#include "ut0ut.h" + +/** Execute a given function exactly once in a multi-threaded environment +or wait for the function to be executed by another thread. + +Example usage: +First the user must create a control variable of type os_once::state_t and +assign it os_once::NEVER_DONE. +Then the user must pass this variable, together with a function to be +executed to os_once::do_or_wait_for_done(). + +Multiple threads can call os_once::do_or_wait_for_done() simultaneously with +the same (os_once::state_t) control variable. The provided function will be +called exactly once and when os_once::do_or_wait_for_done() returns then this +function has completed execution, by this or another thread. In other words +os_once::do_or_wait_for_done() will either execute the provided function or +will wait for its execution to complete if it is already called by another +thread or will do nothing if the function has already completed its execution +earlier. + +This mimics pthread_once(3), but unfortunatelly pthread_once(3) does not +support passing arguments to the init_routine() function. We should use +std::call_once() when we start compiling with C++11 enabled. */ +class os_once { +public: + /** Control variables' state type */ + typedef ib_uint32_t state_t; + + /** Not yet executed. */ + static const state_t NEVER_DONE = 0; + + /** Currently being executed by this or another thread. */ + static const state_t IN_PROGRESS = 1; + + /** Finished execution. */ + static const state_t DONE = 2; + +#ifdef HAVE_ATOMIC_BUILTINS + /** Call a given function or wait its execution to complete if it is + already called by another thread. + @param[in,out] state control variable + @param[in] do_func function to call + @param[in,out] do_func_arg an argument to pass to do_func(). */ + static + void + do_or_wait_for_done( + volatile state_t* state, + void (*do_func)(void*), + void* do_func_arg) + { + /* Avoid calling os_compare_and_swap_uint32() in the most + common case. */ + if (*state == DONE) { + return; + } + + if (os_compare_and_swap_uint32(state, + NEVER_DONE, IN_PROGRESS)) { + /* We are the first. Call the function. */ + + do_func(do_func_arg); + + const bool swapped = os_compare_and_swap_uint32( + state, IN_PROGRESS, DONE); + + ut_a(swapped); + } else { + /* The state is not NEVER_DONE, so either it is + IN_PROGRESS (somebody is calling the function right + now or DONE (it has already been called and completed). + Wait for it to become DONE. */ + for (;;) { + const state_t s = *state; + + switch (s) { + case DONE: + return; + case IN_PROGRESS: + break; + case NEVER_DONE: + /* fall through */ + default: + ut_error; + } + + UT_RELAX_CPU(); + } + } + } +#endif /* HAVE_ATOMIC_BUILTINS */ +}; + +#endif /* os0once_h */ diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h index 9b4ce2343c5..6d3dd850e08 100644 --- a/storage/innobase/include/os0sync.h +++ b/storage/innobase/include/os0sync.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -434,6 +434,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ __sync_lock_test_and_set(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + __sync_lock_release(ptr) + #elif defined(HAVE_IB_SOLARIS_ATOMICS) # define HAVE_ATOMIC_BUILTINS @@ -515,6 +518,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ atomic_swap_ulong(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) atomic_swap_uchar(ptr, 0) + #elif defined(HAVE_WINDOWS_ATOMICS) # define HAVE_ATOMIC_BUILTINS @@ -574,7 +580,8 @@ Returns true if swapped, ptr is pointer to target, old_val is value to compare to, new_val is the value to swap in. */ # define os_compare_and_swap_uint32(ptr, old_val, new_val) \ - (win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val) + (InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), \ + new_val, old_val) == old_val) # define os_compare_and_swap_ulint(ptr, old_val, new_val) \ (win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val) @@ -637,6 +644,9 @@ clobbered */ # define os_atomic_test_and_set_ulong(ptr, new_val) \ InterlockedExchange(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) InterlockedExchange(ptr, 0) + #else # define IB_ATOMICS_STARTUP_MSG \ "Mutexes and rw_locks use InnoDB's own implementation" diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 7922b14cc86..2b58e0717fb 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -3,6 +3,7 @@ Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. +Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -453,6 +454,9 @@ extern struct export_var_t export_vars; /** Global counters */ extern srv_stats_t srv_stats; +/** Simulate compression failures. */ +extern uint srv_simulate_comp_failures; + # ifdef UNIV_PFS_THREAD /* Keys to register InnoDB threads with performance schema */ extern mysql_pfs_key_t buf_page_cleaner_thread_key; diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h index 34cd8ef4bd6..b36e04f2810 100644 --- a/storage/innobase/include/sync0rw.h +++ b/storage/innobase/include/sync0rw.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -108,14 +108,8 @@ extern ib_mutex_t rw_lock_list_mutex; #ifdef UNIV_SYNC_DEBUG /* The global mutex which protects debug info lists of all rw-locks. To modify the debug info list of an rw-lock, this mutex has to be - acquired in addition to the mutex protecting the lock. */ -extern ib_mutex_t rw_lock_debug_mutex; -extern os_event_t rw_lock_debug_event; /*!< If deadlock detection does - not get immediately the mutex it - may wait for this event */ -extern ibool rw_lock_debug_waiters; /*!< This is set to TRUE, if - there may be waiters for the event */ +extern os_fast_mutex_t rw_lock_debug_mutex; #endif /* UNIV_SYNC_DEBUG */ /** Counters for RW locks. */ @@ -141,7 +135,7 @@ extern mysql_pfs_key_t trx_i_s_cache_lock_key; extern mysql_pfs_key_t trx_purge_latch_key; extern mysql_pfs_key_t index_tree_rw_lock_key; extern mysql_pfs_key_t index_online_log_key; -extern mysql_pfs_key_t dict_table_stats_latch_key; +extern mysql_pfs_key_t dict_table_stats_key; extern mysql_pfs_key_t trx_sys_rw_lock_key; extern mysql_pfs_key_t hash_table_rw_lock_key; #endif /* UNIV_PFS_RWLOCK */ diff --git a/storage/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic index f34f3f90b63..cb6f6efbed8 100644 --- a/storage/innobase/include/sync0sync.ic +++ b/storage/innobase/include/sync0sync.ic @@ -108,10 +108,7 @@ mutex_reset_lock_word( ib_mutex_t* mutex) /*!< in: mutex */ { #if defined(HAVE_ATOMIC_BUILTINS) - /* In theory __sync_lock_release should be used to release the lock. - Unfortunately, it does not work properly alone. The workaround is - that more conservative __sync_lock_test_and_set is used instead. */ - os_atomic_test_and_set_byte(&mutex->lock_word, 0); + os_atomic_lock_release_byte(&mutex->lock_word); #else mutex->lock_word = 0; diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index a30bbdbebb2..7c92445b796 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -992,6 +992,11 @@ struct trx_t{ count of tables being flushed. */ /*------------------------------*/ + THD* current_lock_mutex_owner; + /*!< If this is equal to current_thd, + then in innobase_kill_query() we know we + already hold the lock_sys->mutex. */ + /*------------------------------*/ #ifdef UNIV_DEBUG ulint start_line; /*!< Track where it was started from */ const char* start_file; /*!< Filename where it was started */ diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 98c5512bd0b..bc359746a0b 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -44,7 +44,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 17 +#define INNODB_VERSION_BUGFIX 19 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; @@ -439,10 +439,10 @@ typedef unsigned __int64 ib_uint64_t; typedef unsigned __int32 ib_uint32_t; #else /* Use the integer types and formatting strings defined in the C99 standard. */ -# define UINT32PF "%"PRIu32 -# define INT64PF "%"PRId64 -# define UINT64PF "%"PRIu64 -# define UINT64PFx "%016"PRIx64 +# define UINT32PF "%" PRIu32 +# define INT64PF "%" PRId64 +# define UINT64PF "%" PRIu64 +# define UINT64PFx "%016" PRIx64 # define DBUG_LSN_PF UINT64PF typedef int64_t ib_int64_t; typedef uint64_t ib_uint64_t; diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index f99c34294cd..659b2e5b62a 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -49,6 +49,7 @@ Created 5/7/1996 Heikki Tuuri #include "btr0btr.h" #include "dict0boot.h" #include <set> +#include "mysql/plugin.h" #ifdef WITH_WSREP extern my_bool wsrep_debug; @@ -378,6 +379,11 @@ struct lock_stack_t { ulint heap_no; /*!< heap number if rec lock */ }; +extern "C" void thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd); +extern "C" int thd_need_wait_for(const MYSQL_THD thd); +extern "C" +int thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd); + /** Stack to use during DFS search. Currently only a single stack is required because there is no parallel deadlock check. This stack is protected by the lock_sys_t::mutex. */ @@ -393,6 +399,14 @@ UNIV_INTERN mysql_pfs_key_t lock_sys_mutex_key; UNIV_INTERN mysql_pfs_key_t lock_sys_wait_mutex_key; #endif /* UNIV_PFS_MUTEX */ +/* Buffer to collect THDs to report waits for. */ +struct thd_wait_reports { + struct thd_wait_reports *next; /*!< List link */ + ulint used; /*!< How many elements in waitees[] */ + trx_t *waitees[64]; /*!< Trxs for thd_report_wait_for() */ +}; + + #ifdef UNIV_DEBUG UNIV_INTERN ibool lock_print_waits = FALSE; @@ -1023,6 +1037,32 @@ lock_rec_has_to_wait( return(FALSE); } + if ((type_mode & LOCK_GAP || lock_rec_get_gap(lock2)) && + !thd_need_ordering_with(trx->mysql_thd, + lock2->trx->mysql_thd)) { + /* If the upper server layer has already decided on the + commit order between the transaction requesting the + lock and the transaction owning the lock, we do not + need to wait for gap locks. Such ordeering by the upper + server layer happens in parallel replication, where the + commit order is fixed to match the original order on the + master. + + Such gap locks are mainly needed to get serialisability + between transactions so that they will be binlogged in + the correct order so that statement-based replication + will give the correct results. Since the right order + was already determined on the master, we do not need + to enforce it again here. + + Skipping the locks is not essential for correctness, + since in case of deadlock we will just kill the later + transaction and retry it. But it can save some + unnecessary rollbacks and retries. */ + + return (FALSE); + } + #ifdef WITH_WSREP /* if BF thread is locking and has conflict with another BF thread, we need to look at trx ordering and lock types */ @@ -4069,7 +4109,8 @@ static trx_id_t lock_deadlock_search( /*=================*/ - lock_deadlock_ctx_t* ctx) /*!< in/out: deadlock context */ + lock_deadlock_ctx_t* ctx, /*!< in/out: deadlock context */ + struct thd_wait_reports*waitee_ptr) /*!< in/out: list of waitees */ { const lock_t* lock; ulint heap_no; @@ -4149,38 +4190,59 @@ lock_deadlock_search( /* Select the joining transaction as the victim. */ return(ctx->start->id); - } else if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + } else { + /* We do not need to report autoinc locks to the upper + layer. These locks are released before commit, so they + can not cause deadlocks with binlog-fixed commit + order. */ + if (waitee_ptr && + (lock_get_type_low(lock) != LOCK_TABLE || + lock_get_mode(lock) != LOCK_AUTO_INC)) { + if (waitee_ptr->used == + sizeof(waitee_ptr->waitees) / + sizeof(waitee_ptr->waitees[0])) { + waitee_ptr->next = + (struct thd_wait_reports *) + mem_alloc(sizeof(*waitee_ptr)); + waitee_ptr = waitee_ptr->next; + if (!waitee_ptr) { + ctx->too_deep = TRUE; + return(ctx->start->id); + } + waitee_ptr->next = NULL; + waitee_ptr->used = 0; + } + waitee_ptr->waitees[waitee_ptr->used++] = lock->trx; + } - /* Another trx ahead has requested a lock in an - incompatible mode, and is itself waiting for a lock. */ + if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - ++ctx->cost; + /* Another trx ahead has requested a lock in an + incompatible mode, and is itself waiting for a lock. */ - /* Save current search state. */ - if (!lock_deadlock_push(ctx, lock, heap_no)) { + ++ctx->cost; - /* Unable to save current search state, stack - size not big enough. */ + /* Save current search state. */ + if (!lock_deadlock_push(ctx, lock, heap_no)) { - ctx->too_deep = TRUE; + /* Unable to save current search state, stack + size not big enough. */ + + ctx->too_deep = TRUE; -#ifdef WITH_WSREP - if (wsrep_thd_is_BF(ctx->start->mysql_thd, TRUE)) - return(lock->trx->id); - else -#endif /* WITH_WSREP */ return(ctx->start->id); - } + } - ctx->wait_lock = lock->trx->lock.wait_lock; - lock = lock_get_first_lock(ctx, &heap_no); + ctx->wait_lock = lock->trx->lock.wait_lock; + lock = lock_get_first_lock(ctx, &heap_no); - if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + lock = lock_get_next_lock(ctx, lock, heap_no); + } + + } else { lock = lock_get_next_lock(ctx, lock, heap_no); } - - } else { - lock = lock_get_next_lock(ctx, lock, heap_no); } } @@ -4245,6 +4307,48 @@ lock_deadlock_trx_rollback( trx_mutex_exit(trx); } +static +void +lock_report_waiters_to_mysql( +/*=======================*/ + struct thd_wait_reports* waitee_buf_ptr, /*!< in: set of trxs */ + THD* mysql_thd, /*!< in: THD */ + trx_id_t victim_trx_id) /*!< in: Trx selected + as deadlock victim, if + any */ +{ + struct thd_wait_reports* p; + struct thd_wait_reports* q; + ulint i; + + p = waitee_buf_ptr; + while (p) { + i = 0; + while (i < p->used) { + trx_t *w_trx = p->waitees[i]; + /* There is no need to report waits to a trx already + selected as a victim. */ + if (w_trx->id != victim_trx_id) { + /* If thd_report_wait_for() decides to kill the + transaction, then we will get a call back into + innobase_kill_query. We mark this by setting + current_lock_mutex_owner, so we can avoid trying + to recursively take lock_sys->mutex. */ + w_trx->current_lock_mutex_owner = mysql_thd; + thd_report_wait_for(mysql_thd, w_trx->mysql_thd); + w_trx->current_lock_mutex_owner = NULL; + } + ++i; + } + q = p->next; + if (p != waitee_buf_ptr) { + mem_free(p); + } + p = q; + } +} + + /********************************************************************//** Checks if a joining lock request results in a deadlock. If a deadlock is found this function will resolve the dadlock by choosing a victim transaction @@ -4260,13 +4364,23 @@ lock_deadlock_check_and_resolve( const lock_t* lock, /*!< in: lock the transaction is requesting */ const trx_t* trx) /*!< in: transaction */ { - trx_id_t victim_trx_id; + trx_id_t victim_trx_id; + struct thd_wait_reports waitee_buf; + struct thd_wait_reports*waitee_buf_ptr; + THD* start_mysql_thd; ut_ad(trx != NULL); ut_ad(lock != NULL); ut_ad(lock_mutex_own()); assert_trx_in_list(trx); + start_mysql_thd = trx->mysql_thd; + if (start_mysql_thd && thd_need_wait_for(start_mysql_thd)) { + waitee_buf_ptr = &waitee_buf; + } else { + waitee_buf_ptr = NULL; + } + /* Try and resolve as many deadlocks as possible. */ do { lock_deadlock_ctx_t ctx; @@ -4279,7 +4393,19 @@ lock_deadlock_check_and_resolve( ctx.wait_lock = lock; ctx.mark_start = lock_mark_counter; - victim_trx_id = lock_deadlock_search(&ctx); + if (waitee_buf_ptr) { + waitee_buf_ptr->next = NULL; + waitee_buf_ptr->used = 0; + } + + victim_trx_id = lock_deadlock_search(&ctx, waitee_buf_ptr); + + /* Report waits to upper layer, as needed. */ + if (waitee_buf_ptr) { + lock_report_waiters_to_mysql(waitee_buf_ptr, + start_mysql_thd, + victim_trx_id); + } /* Search too deep, we rollback the joining transaction. */ if (ctx.too_deep) { diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 992b1e79b58..1ec08da8a83 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -2679,7 +2679,7 @@ try_again: } ib_logf(IB_LOG_LEVEL_ERROR, - "Tried to read "ULINTPF" bytes at offset " UINT64PF". " + "Tried to read " ULINTPF " bytes at offset " UINT64PF ". " "Was only able to read %ld.", n, offset, (lint) ret); #endif /* __WIN__ */ #ifdef __WIN__ @@ -2866,6 +2866,7 @@ os_file_write_func( DWORD high; ulint n_retries = 0; ulint err; + DWORD saved_error = 0; #ifndef UNIV_HOTBACKUP ulint i; #endif /* !UNIV_HOTBACKUP */ @@ -2955,8 +2956,10 @@ retry: } if (!os_has_said_disk_full) { + char *winmsg = NULL; - err = (ulint) GetLastError(); + saved_error = GetLastError(); + err = (ulint) saved_error; ut_print_timestamp(stderr); @@ -2973,6 +2976,23 @@ retry: name, offset, (ulong) n, (ulong) len, (ulong) err); + /* Ask Windows to prepare a standard message for a + GetLastError() */ + + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, saved_error, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPSTR)&winmsg, 0, NULL); + + if (winmsg) { + fprintf(stderr, + "InnoDB: FormatMessage: Error number %lu means '%s'.\n", + (ulong) saved_error, winmsg); + LocalFree(winmsg); + } + if (strerror((int) err) != NULL) { fprintf(stderr, "InnoDB: Error number %lu means '%s'.\n", @@ -3001,12 +3021,11 @@ retry: } if (!os_has_said_disk_full) { - ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: Write to file %s failed" - " at offset "UINT64PF".\n" + " at offset " UINT64PF ".\n" "InnoDB: %lu bytes should have been written," " only %ld were written.\n" "InnoDB: Operating system error number %lu.\n" @@ -4592,11 +4611,16 @@ os_aio_func( wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER; mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + mode = OS_AIO_SYNC;); + if (mode == OS_AIO_SYNC #ifdef WIN_ASYNC_IO && !srv_use_native_aio #endif /* WIN_ASYNC_IO */ ) { + ibool ret; + /* This is actually an ordinary synchronous read or write: no need to use an i/o-handler thread. NOTE that if we use Windows async i/o, Windows does not allow us to use @@ -4611,13 +4635,23 @@ os_aio_func( and os_file_write_func() */ if (type == OS_FILE_READ) { - return(os_file_read_func(file, buf, offset, n)); + ret = os_file_read_func(file, buf, offset, n); + } else { + + ut_ad(!srv_read_only_mode); + ut_a(type == OS_FILE_WRITE); + + ret = os_file_write_func(name, file, buf, offset, n); } - ut_ad(!srv_read_only_mode); - ut_a(type == OS_FILE_WRITE); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + errno = 28;); - return(os_file_write_func(name, file, buf, offset, n)); + return ret; } try_again: @@ -5442,7 +5476,13 @@ consecutive_loop: aio_slot->offset, total_len); } - ut_a(ret); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + errno = 28;); + srv_set_io_thread_op_info(global_segment, "file i/o done"); if (aio_slot->type == OS_FILE_READ && n_consecutive > 1) { diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index ab7a19795a3..4b19a35925e 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -1309,6 +1309,30 @@ page_zip_compress( MONITOR_INC(MONITOR_PAGE_COMPRESS); + /* Simulate a compression failure with a probability determined by + innodb_simulate_comp_failures, only if the page has 2 or more + records. */ + + if (srv_simulate_comp_failures + && !dict_index_is_ibuf(index) + && page_get_n_recs(page) >= 2 + && ((ulint)(rand() % 100) < srv_simulate_comp_failures) + && strcasecmp(index->table_name, "IBUF_DUMMY") != 0) { + +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: Simulating a compression failure" + " for table %s, index %s, page %lu (%s)\n", + index->table_name, + index->name, + page_get_page_no(page), + page_is_leaf(page) ? "leaf" : "non-leaf"); + +#endif + + goto err_exit; + } + heap = mem_heap_create(page_zip_get_size(page_zip) + n_fields * (2 + sizeof(ulint)) + REC_OFFS_HEADER_SIZE diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index e6487730a77..c144ca890f8 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -151,35 +151,37 @@ row_ins_alloc_sys_fields( ut_ad(row && table && heap); ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table)); - /* 1. Allocate buffer for row id */ + /* allocate buffer to hold the needed system created hidden columns. */ + uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN; + ptr = static_cast<byte*>(mem_heap_zalloc(heap, len)); + /* 1. Populate row-id */ col = dict_table_get_sys_col(table, DATA_ROW_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROW_ID_LEN)); - dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN); node->row_id_buf = ptr; - /* 3. Allocate buffer for trx id */ + ptr += DATA_ROW_ID_LEN; + /* 2. Populate trx id */ col = dict_table_get_sys_col(table, DATA_TRX_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_TRX_ID_LEN)); dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN); node->trx_id_buf = ptr; - /* 4. Allocate buffer for roll ptr */ + ptr += DATA_TRX_ID_LEN; + + /* 3. Populate roll ptr */ col = dict_table_get_sys_col(table, DATA_ROLL_PTR); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROLL_PTR_LEN)); dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN); } diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 56cf9f1943c..86b47c9f3bd 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -786,7 +786,7 @@ row_merge_read( if (UNIV_UNLIKELY(!success)) { ut_print_timestamp(stderr); fprintf(stderr, - " InnoDB: failed to read merge block at "UINT64PF"\n", + " InnoDB: failed to read merge block at " UINT64PF "\n", ofs); } diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 93d13ea49ee..dd7af8a3526 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1359,7 +1359,7 @@ error_exit: if (doc_id < next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be large than" - " "UINT64PF" for table", + " " UINT64PF " for table", next_doc_id - 1); ut_print_name(stderr, trx, TRUE, table->name); putc('\n', stderr); @@ -1374,9 +1374,9 @@ error_exit: if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " used Doc ID "UINT64PF" cannot" + " used Doc ID " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, next_doc_id - 1, FTS_DOC_ID_MAX_STEP); diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 359ae3f2c21..e5a7694cb93 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -877,16 +877,15 @@ row_sel_get_clust_rec( if (!node->read_view) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level - we lock only the record, i.e., next-key locking is - not used. */ ulint lock_type; trx_t* trx; trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { lock_type = LOCK_REC_NOT_GAP; @@ -1502,12 +1501,6 @@ rec_loop: search result set, resulting in the phantom problem. */ if (!consistent_read) { - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation - level, we lock only the record, i.e., next-key - locking is not used. */ - rec_t* next_rec = page_rec_get_next(rec); ulint lock_type; trx_t* trx; @@ -1517,6 +1510,10 @@ rec_loop: offsets = rec_get_offsets(next_rec, index, offsets, ULINT_UNDEFINED, &heap); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation + level, we lock only the record, i.e., next-key + locking is not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -1565,12 +1562,6 @@ skip_lock: if (!consistent_read) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level, - we lock only the record, i.e., next-key locking is - not used. */ - ulint lock_type; trx_t* trx; @@ -1579,6 +1570,10 @@ skip_lock: trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level, + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -4227,7 +4222,7 @@ rec_loop: /* Try to place a lock on the index record */ /* If innodb_locks_unsafe_for_binlog option is used - or this session is using a READ COMMITTED isolation + or this session is using a READ COMMITTED or lower isolation level we do not lock gaps. Supremum record is really a gap and therefore we do not set locks there. */ @@ -4369,7 +4364,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), @@ -4418,7 +4413,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index ea346566e57..64417b1e5fb 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -41,8 +41,8 @@ Created 12/9/2009 Jimmy Yang /* Macro to standardize the counter names for counters in the "monitor_buf_page" module as they have very structured defines */ #define MONITOR_BUF_PAGE(name, description, code, op, op_code) \ - {"buffer_page_"op"_"name, "buffer_page_io", \ - "Number of "description" Pages "op, \ + {"buffer_page_" op "_" name, "buffer_page_io", \ + "Number of " description " Pages " op, \ MONITOR_GROUP_MODULE, MONITOR_DEFAULT_START, \ MONITOR_##code##_##op_code} diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 6a410285f2b..6e03f715f28 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -3,6 +3,7 @@ Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. +Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -473,6 +474,9 @@ current_time % 5 != 0. */ #endif /* MEM_PERIODIC_CHECK */ # define SRV_MASTER_DICT_LRU_INTERVAL (47) +/** Simulate compression failures. */ +UNIV_INTERN uint srv_simulate_comp_failures = 0; + /** Acquire the system_mutex. */ #define srv_sys_mutex_enter() do { \ mutex_enter(&srv_sys->mutex); \ diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 0c04fba421a..1c2bfcbd920 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -2197,9 +2197,9 @@ innobase_start_or_create_for_mysql(void) } else if (size != srv_log_file_size) { ib_logf(IB_LOG_LEVEL_ERROR, "Log file %s is" - " of different size "UINT64PF" bytes" + " of different size " UINT64PF " bytes" " than other log" - " files "UINT64PF" bytes!", + " files " UINT64PF " bytes!", logfilename, size << UNIV_PAGE_SIZE_SHIFT, (os_offset_t) srv_log_file_size diff --git a/storage/innobase/sync/sync0arr.cc b/storage/innobase/sync/sync0arr.cc index 2cfb693f8ba..986010039f9 100644 --- a/storage/innobase/sync/sync0arr.cc +++ b/storage/innobase/sync/sync0arr.cc @@ -182,6 +182,33 @@ sync_array_get_nth_cell( } /******************************************************************//** +Looks for a cell with the given thread id. +@return pointer to cell or NULL if not found */ +static +sync_cell_t* +sync_array_find_thread( +/*===================*/ + sync_array_t* arr, /*!< in: wait array */ + os_thread_id_t thread) /*!< in: thread id */ +{ + ulint i; + sync_cell_t* cell; + + for (i = 0; i < arr->n_cells; i++) { + + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object != NULL + && os_thread_eq(cell->thread, thread)) { + + return(cell); /* Found */ + } + } + + return(NULL); /* Not found */ +} + +/******************************************************************//** Reserves the mutex semaphore protecting a sync array. */ static void @@ -432,8 +459,10 @@ static void sync_array_cell_print( /*==================*/ - FILE* file, /*!< in: file where to print */ - sync_cell_t* cell) /*!< in: sync cell */ + FILE* file, /*!< in: file where to print */ + sync_cell_t* cell, /*!< in: sync cell */ + os_thread_id_t* reserver) /*!< out: write reserver or + 0 */ { ib_mutex_t* mutex; rw_lock_t* rwlock; @@ -454,19 +483,21 @@ sync_array_cell_print( been freed meanwhile */ mutex = cell->old_wait_mutex; - fprintf(file, - "Mutex at %p created file %s line %lu, lock var %lu\n" + if (mutex) { + fprintf(file, + "Mutex at %p created file %s line %lu, lock var %lu\n" #ifdef UNIV_SYNC_DEBUG - "Last time reserved in file %s line %lu, " + "Last time reserved in file %s line %lu, " #endif /* UNIV_SYNC_DEBUG */ - "waiters flag %lu\n", - (void*) mutex, innobase_basename(mutex->cfile_name), - (ulong) mutex->cline, - (ulong) mutex->lock_word, + "waiters flag %lu\n", + (void*) mutex, innobase_basename(mutex->cfile_name), + (ulong) mutex->cline, + (ulong) mutex->lock_word, #ifdef UNIV_SYNC_DEBUG - mutex->file_name, (ulong) mutex->line, + mutex->file_name, (ulong) mutex->line, #endif /* UNIV_SYNC_DEBUG */ - (ulong) mutex->waiters); + (ulong) mutex->waiters); + } } else if (type == RW_LOCK_EX || type == RW_LOCK_WAIT_EX @@ -478,33 +509,36 @@ sync_array_cell_print( rwlock = cell->old_wait_rw_lock; - fprintf(file, - " RW-latch at %p created in file %s line %lu\n", - (void*) rwlock, innobase_basename(rwlock->cfile_name), - (ulong) rwlock->cline); - writer = rw_lock_get_writer(rwlock); - if (writer != RW_LOCK_NOT_LOCKED) { + if (rwlock) { fprintf(file, - "a writer (thread id %lu) has" - " reserved it in mode %s", - (ulong) os_thread_pf(rwlock->writer_thread), - writer == RW_LOCK_EX - ? " exclusive\n" - : " wait exclusive\n"); - } + " RW-latch at %p created in file %s line %lu\n", + (void*) rwlock, innobase_basename(rwlock->cfile_name), + (ulong) rwlock->cline); + writer = rw_lock_get_writer(rwlock); + if (writer != RW_LOCK_NOT_LOCKED) { + fprintf(file, + "a writer (thread id %lu) has" + " reserved it in mode %s", + (ulong) os_thread_pf(rwlock->writer_thread), + writer == RW_LOCK_EX + ? " exclusive\n" + : " wait exclusive\n"); + *reserver = rwlock->writer_thread; + } - fprintf(file, - "number of readers %lu, waiters flag %lu, " - "lock_word: %lx\n" - "Last time read locked in file %s line %lu\n" - "Last time write locked in file %s line %lu\n", - (ulong) rw_lock_get_reader_count(rwlock), - (ulong) rwlock->waiters, - rwlock->lock_word, - innobase_basename(rwlock->last_s_file_name), - (ulong) rwlock->last_s_line, - rwlock->last_x_file_name, - (ulong) rwlock->last_x_line); + fprintf(file, + "number of readers %lu, waiters flag %lu, " + "lock_word: %lx\n" + "Last time read locked in file %s line %lu\n" + "Last time write locked in file %s line %lu\n", + (ulong) rw_lock_get_reader_count(rwlock), + (ulong) rwlock->waiters, + rwlock->lock_word, + innobase_basename(rwlock->last_s_file_name), + (ulong) rwlock->last_s_line, + rwlock->last_x_file_name, + (ulong) rwlock->last_x_line); + } } else { ut_error; } @@ -515,32 +549,6 @@ sync_array_cell_print( } #ifdef UNIV_SYNC_DEBUG -/******************************************************************//** -Looks for a cell with the given thread id. -@return pointer to cell or NULL if not found */ -static -sync_cell_t* -sync_array_find_thread( -/*===================*/ - sync_array_t* arr, /*!< in: wait array */ - os_thread_id_t thread) /*!< in: thread id */ -{ - ulint i; - sync_cell_t* cell; - - for (i = 0; i < arr->n_cells; i++) { - - cell = sync_array_get_nth_cell(arr, i); - - if (cell->wait_object != NULL - && os_thread_eq(cell->thread, thread)) { - - return(cell); /* Found */ - } - } - - return(NULL); /* Not found */ -} /******************************************************************//** Recursion step for deadlock detection. @@ -602,6 +610,7 @@ sync_array_detect_deadlock( os_thread_id_t thread; ibool ret; rw_lock_debug_t*debug; + os_thread_id_t reserver=0; ut_a(arr); ut_a(start); @@ -637,10 +646,10 @@ sync_array_detect_deadlock( depth); if (ret) { fprintf(stderr, - "Mutex %p owned by thread %lu file %s line %lu\n", + "Mutex %p owned by thread %lu file %s line %lu\n", mutex, (ulong) os_thread_pf(mutex->thread_id), mutex->file_name, (ulong) mutex->line); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); return(TRUE); } @@ -678,7 +687,7 @@ sync_array_detect_deadlock( print: fprintf(stderr, "rw-lock %p ", (void*) lock); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); rw_lock_debug_print(stderr, debug); return(TRUE); } @@ -921,6 +930,7 @@ sync_array_print_long_waits_low( double diff; sync_cell_t* cell; void* wait_object; + os_thread_id_t reserver=0; cell = sync_array_get_nth_cell(arr, i); @@ -936,7 +946,7 @@ sync_array_print_long_waits_low( if (diff > SYNC_ARRAY_TIMEOUT) { fputs("InnoDB: Warning: a long semaphore wait:\n", stderr); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); *noticed = TRUE; } @@ -951,6 +961,60 @@ sync_array_print_long_waits_low( } } + /* We found a long semaphore wait, wait all threads that are + waiting for a semaphore. */ + if (*noticed) { + for (i = 0; i < arr->n_cells; i++) { + void* wait_object; + os_thread_id_t reserver=(os_thread_id_t)ULINT_UNDEFINED; + sync_cell_t* cell; + ulint loop = 0; + + cell = sync_array_get_nth_cell(arr, i); + + wait_object = cell->wait_object; + + if (wait_object == NULL || !cell->waiting) { + + continue; + } + + fputs("InnoDB: Warning: semaphore wait:\n", + stderr); + sync_array_cell_print(stderr, cell, &reserver); + + /* Try to output cell information for writer recursive way */ + while (reserver != (os_thread_id_t)ULINT_UNDEFINED) { + sync_cell_t* reserver_wait; + + reserver_wait = sync_array_find_thread(arr, reserver); + + if (reserver_wait && + reserver_wait->wait_object != NULL && + reserver_wait->waiting) { + fputs("InnoDB: Warning: Writer thread is waiting this semaphore:\n", + stderr); + reserver = (os_thread_id_t)ULINT_UNDEFINED; + sync_array_cell_print(stderr, reserver_wait, &reserver); + loop++; + + if (reserver_wait->thread == reserver) { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + } else { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + + /* This is protection against loop */ + if (loop > 100) { + fputs("InnoDB: Warning: Too many waiting threads.\n", stderr); + break; + } + + } + } + } + #undef SYNC_ARRAY_TIMEOUT return(fatal); @@ -1030,6 +1094,7 @@ sync_array_print_info_low( { ulint i; ulint count = 0; + os_thread_id_t r = 0; fprintf(file, "OS WAIT ARRAY INFO: reservation count %ld\n", @@ -1042,7 +1107,7 @@ sync_array_print_info_low( if (cell->wait_object != NULL) { count++; - sync_array_cell_print(file, cell); + sync_array_cell_print(file, cell, &r); } } } diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc index ebf73917702..e129d39fc9d 100644 --- a/storage/innobase/sync/sync0rw.cc +++ b/storage/innobase/sync/sync0rw.cc @@ -151,18 +151,12 @@ UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key; To modify the debug info list of an rw-lock, this mutex has to be acquired in addition to the mutex protecting the lock. */ -UNIV_INTERN ib_mutex_t rw_lock_debug_mutex; +UNIV_INTERN os_fast_mutex_t rw_lock_debug_mutex; # ifdef UNIV_PFS_MUTEX UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key; # endif -/* If deadlock detection does not get immediately the mutex, -it may wait for this event */ -UNIV_INTERN os_event_t rw_lock_debug_event; -/* This is set to TRUE, if there may be waiters for the event */ -UNIV_INTERN ibool rw_lock_debug_waiters; - /******************************************************************//** Creates a debug info struct. */ static @@ -690,22 +684,7 @@ void rw_lock_debug_mutex_enter(void) /*===========================*/ { -loop: - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_reset(rw_lock_debug_event); - - rw_lock_debug_waiters = TRUE; - - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_wait(rw_lock_debug_event); - - goto loop; + os_fast_mutex_lock(&rw_lock_debug_mutex); } /******************************************************************//** @@ -715,12 +694,7 @@ void rw_lock_debug_mutex_exit(void) /*==========================*/ { - mutex_exit(&rw_lock_debug_mutex); - - if (rw_lock_debug_waiters) { - rw_lock_debug_waiters = FALSE; - os_event_set(rw_lock_debug_event); - } + os_fast_mutex_unlock(&rw_lock_debug_mutex); } /******************************************************************//** diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index 5ef8a02fb3f..54018471abc 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -1472,11 +1472,7 @@ sync_init(void) SYNC_NO_ORDER_CHECK); #ifdef UNIV_SYNC_DEBUG - mutex_create(rw_lock_debug_mutex_key, &rw_lock_debug_mutex, - SYNC_NO_ORDER_CHECK); - - rw_lock_debug_event = os_event_create(); - rw_lock_debug_waiters = FALSE; + os_fast_mutex_init(rw_lock_debug_mutex_key, &rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ } @@ -1544,6 +1540,7 @@ sync_close(void) sync_order_checks_on = FALSE; sync_thread_level_arrays_free(); + os_fast_mutex_free(&rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ sync_initialized = FALSE; @@ -1558,12 +1555,12 @@ sync_print_wait_info( FILE* file) /*!< in: file where to print */ { fprintf(file, - "Mutex spin waits "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-shared spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-excl spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n", + "Mutex spin waits " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-shared spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-excl spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n", (ib_uint64_t) mutex_spin_wait_count, (ib_uint64_t) mutex_spin_round_count, (ib_uint64_t) mutex_os_wait_count, diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index f6360562ae7..01ccfb8a6d0 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -1639,7 +1639,7 @@ trx_i_s_create_lock_id( } else { /* table lock */ res_len = ut_snprintf(lock_id, lock_id_size, - TRX_ID_FMT":"UINT64PF, + TRX_ID_FMT":" UINT64PF, row->lock_trx_id, row->lock_table_id); } diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 38b21d8d428..272f8377f68 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -50,6 +50,9 @@ Created 3/26/1996 Heikki Tuuri #include<set> +extern "C" +int thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2); + /** Set of table_id */ typedef std::set<table_id_t> table_id_set; @@ -1833,7 +1836,7 @@ state_ok: if (trx->undo_no != 0) { newline = TRUE; - fprintf(f, ", undo log entries "TRX_ID_FMT, trx->undo_no); + fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); } if (newline) { @@ -1936,9 +1939,8 @@ trx_assert_started( #endif /* UNIV_DEBUG */ /*******************************************************************//** -Compares the "weight" (or size) of two transactions. Transactions that -have edited non-transactional tables are considered heavier than ones -that have not. +Compares the "weight" (or size) of two transactions. The heavier the weight, +the more reluctant we will be to choose the transaction as a deadlock victim. @return TRUE if weight(a) >= weight(b) */ UNIV_INTERN ibool @@ -1947,26 +1949,19 @@ trx_weight_ge( const trx_t* a, /*!< in: the first transaction to be compared */ const trx_t* b) /*!< in: the second transaction to be compared */ { - ibool a_notrans_edit; - ibool b_notrans_edit; - - /* If mysql_thd is NULL for a transaction we assume that it has - not edited non-transactional tables. */ - - a_notrans_edit = a->mysql_thd != NULL - && thd_has_edited_nontrans_tables(a->mysql_thd); - - b_notrans_edit = b->mysql_thd != NULL - && thd_has_edited_nontrans_tables(b->mysql_thd); - - if (a_notrans_edit != b_notrans_edit) { + int pref; - return(a_notrans_edit); + /* First ask the upper server layer if it has any preference for which + to prefer as a deadlock victim. */ + pref= thd_deadlock_victim_preference(a->mysql_thd, b->mysql_thd); + if (pref < 0) { + return FALSE; + } else if (pref > 0) { + return TRUE; } - /* Either both had edited non-transactional tables or both had - not, we fall back to comparing the number of altered/locked - rows. */ + /* Upper server layer had no preference, we fall back to comparing the + number of altered/locked rows. */ #if 0 fprintf(stderr, @@ -2133,7 +2128,7 @@ trx_recover_for_mysql( ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Transaction contains changes" - " to "TRX_ID_FMT" rows\n", + " to " TRX_ID_FMT " rows\n", trx->undo_no); count++; diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 081e90f89f7..7155faa72f4 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -57,7 +57,7 @@ C_MODE_END #endif #define THD_TRN (*(TRN **)thd_ha_data(thd, maria_hton)) -ulong pagecache_division_limit, pagecache_age_threshold; +ulong pagecache_division_limit, pagecache_age_threshold, pagecache_file_hash_size; ulonglong pagecache_buffer_size; const char *zerofill_error_msg= "Table is from another system and must be zerofilled or repaired to be " @@ -250,6 +250,13 @@ static MYSQL_SYSVAR_ULONG(pagecache_division_limit, pagecache_division_limit, "The minimum percentage of warm blocks in key cache", 0, 0, 100, 1, 100, 1); +static MYSQL_SYSVAR_ULONG(pagecache_file_hash_size, pagecache_file_hash_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of hash buckets for open and changed files. If you have a lot of Aria " + "files open you should increase this for faster flush of changes. A good " + "value is probably 1/10 of number of possible open Aria files.", 0,0, + 512, 128, 16384, 1); + static MYSQL_SYSVAR_SET(recover, maria_recover_options, PLUGIN_VAR_OPCMDARG, "Specifies how corrupted tables should be automatically repaired." " Possible values are one or more of \"NORMAL\" (the default), " @@ -1236,6 +1243,14 @@ int ha_maria::open(const char *name, int mode, uint test_if_locked) table->key_info[i].block_size= file->s->keyinfo[i].block_length; } my_errno= 0; + + /* Count statistics of usage for newly open normal files */ + if (file->s->reopen == 1 && ! (test_if_locked & HA_OPEN_TMP_TABLE)) + { + if (file->s->delay_key_write) + feature_files_opened_with_delayed_keys++; + } + return my_errno; } @@ -2819,7 +2834,8 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) TRN *trn; int error; uint locked_tables; - TABLE *table; + DYNAMIC_ARRAY used_tables; + DBUG_ENTER("ha_maria::implicit_commit"); if (!maria_hton || !(trn= THD_TRN)) DBUG_RETURN(0); @@ -2835,7 +2851,38 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) DBUG_PRINT("info", ("locked_tables, skipping")); DBUG_RETURN(0); } + locked_tables= trnman_has_locked_tables(trn); + + if (new_trn && trn && trn->used_tables) + { + MARIA_USED_TABLES *tables; + /* + Save locked tables so that we can move them to another transaction + We are using a dynamic array as locked_tables in some cases can be + smaller than the used_tables list (for example when the server does + early unlock of tables. + */ + + my_init_dynamic_array2(&used_tables, sizeof(MARIA_SHARE*), (void*) 0, + locked_tables, 8, MYF(MY_THREAD_SPECIFIC)); + for (tables= (MARIA_USED_TABLES*) trn->used_tables; + tables; + tables= tables->next) + { + if (tables->share->base.born_transactional) + { + if (insert_dynamic(&used_tables, (uchar*) &tables->share)) + { + error= HA_ERR_OUT_OF_MEM; + goto end_and_free; + } + } + } + } + else + bzero(&used_tables, sizeof(used_tables)); + error= 0; if (unlikely(ma_commit(trn))) error= 1; @@ -2859,7 +2906,7 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) if (unlikely(trn == NULL)) { error= HA_ERR_OUT_OF_MEM; - goto end; + goto end_and_free; } /* Move all locked tables to the new transaction @@ -2868,13 +2915,21 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) when we should call _ma_setup_live_state() and in some cases, like in check table, we use the table without calling start_stmt(). */ - for (table=thd->open_tables; table ; table=table->next) + + uint i; + for (i= 0 ; i < used_tables.elements ; i++) { - if (table->db_stat && table->file->ht == maria_hton) + MARIA_SHARE *share; + LIST *handlers; + + share= *(dynamic_element(&used_tables, i, MARIA_SHARE**)); + /* Find table instances that was used in this transaction */ + for (handlers= share->open_list; handlers; handlers= handlers->next) { - MARIA_HA *handler= ((ha_maria*) table->file)->file; - if (handler->s->base.born_transactional) - { + MARIA_HA *handler= (MARIA_HA*) handlers->data; + if (handler->external_ref && + ((TABLE*) handler->external_ref)->in_use == thd) + { _ma_set_trn_for_table(handler, trn); /* If handler uses versioning */ if (handler->s->lock_key_trees) @@ -2888,6 +2943,8 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) /* This is just a commit, tables stay locked if they were: */ trnman_reset_locked_tables(trn, locked_tables); +end_and_free: + delete_dynamic(&used_tables); end: DBUG_RETURN(error); } @@ -3520,10 +3577,11 @@ static int ha_maria_init(void *p) mark_recovery_start(log_dir)) || !init_pagecache(maria_pagecache, (size_t) pagecache_buffer_size, pagecache_division_limit, - pagecache_age_threshold, maria_block_size, 0) || + pagecache_age_threshold, maria_block_size, pagecache_file_hash_size, + 0) || !init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0) || + TRANSLOG_PAGE_SIZE, 0, 0) || translog_init(maria_data_root, log_file_size, MYSQL_VERSION_ID, server_id, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || @@ -3639,6 +3697,7 @@ struct st_mysql_sys_var* system_variables[]= { MYSQL_SYSVAR(pagecache_age_threshold), MYSQL_SYSVAR(pagecache_buffer_size), MYSQL_SYSVAR(pagecache_division_limit), + MYSQL_SYSVAR(pagecache_file_hash_size), MYSQL_SYSVAR(recover), MYSQL_SYSVAR(repair_threads), MYSQL_SYSVAR(sort_buffer_size), @@ -3870,6 +3929,6 @@ maria_declare_plugin(aria) status_variables, /* status variables */ system_variables, /* system variables */ "1.5", /* string version */ - MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */ + MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ } maria_declare_plugin_end; diff --git a/storage/maria/ma_checkpoint.c b/storage/maria/ma_checkpoint.c index e4adf5fbccd..de8a9610a64 100644 --- a/storage/maria/ma_checkpoint.c +++ b/storage/maria/ma_checkpoint.c @@ -230,7 +230,7 @@ static int really_execute_checkpoint(void) sizeof(checkpoint_start_log_horizon_char); for (i= 0; i < (sizeof(record_pieces)/sizeof(record_pieces[0])); i++) { - log_array[TRANSLOG_INTERNAL_PARTS + 1 + i].str= record_pieces[i].str; + log_array[TRANSLOG_INTERNAL_PARTS + 1 + i].str= (uchar*) record_pieces[i].str; log_array[TRANSLOG_INTERNAL_PARTS + 1 + i].length= record_pieces[i].length; total_rec_length+= (translog_size_t) record_pieces[i].length; } diff --git a/storage/maria/ma_close.c b/storage/maria/ma_close.c index dd3a034425a..4532b029126 100644 --- a/storage/maria/ma_close.c +++ b/storage/maria/ma_close.c @@ -80,7 +80,10 @@ int maria_close(register MARIA_HA *info) } flag= !--share->reopen; if (!internal_table) - maria_open_list=list_delete(maria_open_list,&info->open_list); + { + maria_open_list= list_delete(maria_open_list,&info->open_list); + share->open_list= list_delete(share->open_list, &info->share_list); + } my_free(info->rec_buff); (*share->end)(info); @@ -91,6 +94,7 @@ int maria_close(register MARIA_HA *info) /* Check that we don't have any dangling pointers from the transaction */ DBUG_ASSERT(share->in_trans == 0); + DBUG_ASSERT(share->open_list == 0); if (share->kfile.file >= 0) { diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index e06084cef07..1d274d796be 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -211,8 +211,9 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name, if (!internal_table) { - m_info->open_list.data=(void*) m_info; - maria_open_list=list_add(maria_open_list,&m_info->open_list); + m_info->open_list.data= m_info->share_list.data= (void*) m_info; + maria_open_list= list_add(maria_open_list, &m_info->open_list); + share->open_list= list_add(share->open_list, &m_info->share_list); } else { diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index 8e8ecf945f0..bb085bbdc7a 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -502,7 +502,7 @@ static void test_key_cache(PAGECACHE *pagecache, #define PAGECACHE_HASH(p, f, pos) (((ulong) (pos) + \ (ulong) (f).file) & (p->hash_entries-1)) -#define FILE_HASH(f) ((uint) (f).file & (PAGECACHE_CHANGED_BLOCKS_HASH - 1)) +#define FILE_HASH(f,cache) ((uint) (f).file & (cache->changed_blocks_hash_size-1)) #define DEFAULT_PAGECACHE_DEBUG_LOG "pagecache_debug.log" @@ -743,7 +743,8 @@ static inline uint next_power(uint value) ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, uint age_threshold, - uint block_size, myf my_readwrite_flags) + uint block_size, uint changed_blocks_hash_size, + myf my_readwrite_flags) { ulong blocks, hash_links, length; int error; @@ -786,6 +787,10 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, 2 * sizeof(PAGECACHE_HASH_LINK) + sizeof(PAGECACHE_HASH_LINK*) * 5/4 + block_size)); + /* Changed blocks hash needs to be a power of 2 */ + changed_blocks_hash_size= my_round_up_to_next_power(MY_MAX(changed_blocks_hash_size, + MIN_PAGECACHE_CHANGED_BLOCKS_HASH_SIZE)); + /* We need to support page cache with just one block to be able to do scanning of rows-in-block files @@ -809,10 +814,11 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, hash_links= MAX_THREADS + blocks - 1; #endif while ((length= (ALIGN_SIZE(blocks * sizeof(PAGECACHE_BLOCK_LINK)) + - ALIGN_SIZE(hash_links * sizeof(PAGECACHE_HASH_LINK)) + ALIGN_SIZE(sizeof(PAGECACHE_HASH_LINK*) * - pagecache->hash_entries))) + - (blocks << pagecache->shift) > use_mem) + pagecache->hash_entries) + + ALIGN_SIZE(hash_links * sizeof(PAGECACHE_HASH_LINK)) + + sizeof(PAGECACHE_BLOCK_LINK*)* (changed_blocks_hash_size*2))) + + (blocks << pagecache->shift) > use_mem && blocks > 8) blocks--; /* Allocate memory for cache page buffers */ if ((pagecache->block_mem= @@ -823,8 +829,17 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, Allocate memory for blocks, hash_links and hash entries; For each block 2 hash links are allocated */ - if ((pagecache->block_root= - (PAGECACHE_BLOCK_LINK*) my_malloc((size_t) length, MYF(0)))) + if (my_multi_malloc(MYF(MY_ZEROFILL), + &pagecache->block_root, blocks * sizeof(PAGECACHE_BLOCK_LINK), + &pagecache->hash_root, + sizeof(PAGECACHE_HASH_LINK*) * pagecache->hash_entries, + &pagecache->hash_link_root, + hash_links * sizeof(PAGECACHE_HASH_LINK), + &pagecache->changed_blocks, + sizeof(PAGECACHE_BLOCK_LINK*) * changed_blocks_hash_size, + &pagecache->file_blocks, + sizeof(PAGECACHE_BLOCK_LINK*) * changed_blocks_hash_size, + NullS)) break; my_large_free(pagecache->block_mem); pagecache->block_mem= 0; @@ -834,19 +849,6 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, pagecache->blocks_unused= blocks; pagecache->disk_blocks= (long) blocks; pagecache->hash_links= hash_links; - pagecache->hash_root= - (PAGECACHE_HASH_LINK**) ((char*) pagecache->block_root + - ALIGN_SIZE(blocks*sizeof(PAGECACHE_BLOCK_LINK))); - pagecache->hash_link_root= - (PAGECACHE_HASH_LINK*) ((char*) pagecache->hash_root + - ALIGN_SIZE((sizeof(PAGECACHE_HASH_LINK*) * - pagecache->hash_entries))); - bzero((uchar*) pagecache->block_root, - pagecache->disk_blocks * sizeof(PAGECACHE_BLOCK_LINK)); - bzero((uchar*) pagecache->hash_root, - pagecache->hash_entries * sizeof(PAGECACHE_HASH_LINK*)); - bzero((uchar*) pagecache->hash_link_root, - pagecache->hash_links * sizeof(PAGECACHE_HASH_LINK)); pagecache->hash_links_used= 0; pagecache->free_hash_list= NULL; pagecache->blocks_used= pagecache->blocks_changed= 0; @@ -866,6 +868,7 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, pagecache->age_threshold= (age_threshold ? blocks * age_threshold / 100 : blocks); + pagecache->changed_blocks_hash_size= changed_blocks_hash_size; pagecache->cnt_for_resize_op= 0; pagecache->resize_in_flush= 0; @@ -879,12 +882,6 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, pagecache->disk_blocks, (long) pagecache->block_root, pagecache->hash_entries, (long) pagecache->hash_root, pagecache->hash_links, (long) pagecache->hash_link_root)); - bzero((uchar*) pagecache->changed_blocks, - sizeof(pagecache->changed_blocks[0]) * - PAGECACHE_CHANGED_BLOCKS_HASH); - bzero((uchar*) pagecache->file_blocks, - sizeof(pagecache->file_blocks[0]) * - PAGECACHE_CHANGED_BLOCKS_HASH); pagecache->blocks= pagecache->disk_blocks > 0 ? pagecache->disk_blocks : 0; DBUG_RETURN((ulong) pagecache->disk_blocks); @@ -980,12 +977,11 @@ static int flush_all_key_blocks(PAGECACHE *pagecache) #if NOT_USED /* keep disabled until code is fixed see above !! */ ulong resize_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { ulong blocks; struct st_my_thread_var *thread; WQUEUE *wqueue; - DBUG_ENTER("resize_pagecache"); if (!pagecache->inited) @@ -1028,7 +1024,7 @@ ulong resize_pagecache(PAGECACHE *pagecache, end_pagecache(pagecache, 0); /* Don't free mutex */ /* The following will work even if use_mem is 0 */ blocks= init_pagecache(pagecache, pagecache->block_size, use_mem, - division_limit, age_threshold, + division_limit, age_threshold, changed_blocks_hash_size, pagecache->readwrite_flags); finish: @@ -1237,7 +1233,7 @@ static void link_to_file_list(PAGECACHE *pagecache, { if (unlink_flag) unlink_changed(block); - link_changed(block, &pagecache->file_blocks[FILE_HASH(*file)]); + link_changed(block, &pagecache->file_blocks[FILE_HASH(*file, pagecache)]); if (block->status & PCBLOCK_CHANGED) { block->status&= ~(PCBLOCK_CHANGED | PCBLOCK_DEL_WRITE); @@ -1258,7 +1254,7 @@ static inline void link_to_changed_list(PAGECACHE *pagecache, { unlink_changed(block); link_changed(block, - &pagecache->changed_blocks[FILE_HASH(block->hash_link->file)]); + &pagecache->changed_blocks[FILE_HASH(block->hash_link->file, pagecache)]); block->status|=PCBLOCK_CHANGED; pagecache->blocks_changed++; pagecache->global_blocks_changed++; @@ -4578,7 +4574,7 @@ static int flush_pagecache_blocks_int(PAGECACHE *pagecache, Count how many key blocks we have to cache to be able to flush all dirty pages with minimum seek moves. */ - for (block= pagecache->changed_blocks[FILE_HASH(*file)] ; + for (block= pagecache->changed_blocks[FILE_HASH(*file, pagecache)] ; block; block= block->next_changed) { @@ -4603,7 +4599,7 @@ static int flush_pagecache_blocks_int(PAGECACHE *pagecache, /* Retrieve the blocks and write them to a buffer to be flushed */ restart: end= (pos= cache)+count; - for (block= pagecache->changed_blocks[FILE_HASH(*file)] ; + for (block= pagecache->changed_blocks[FILE_HASH(*file, pagecache)] ; block; block= next) { @@ -4729,7 +4725,7 @@ restart: #if defined(PAGECACHE_DEBUG) cnt=0; #endif - for (block= pagecache->file_blocks[FILE_HASH(*file)] ; + for (block= pagecache->file_blocks[FILE_HASH(*file, pagecache)] ; block; block= next) { @@ -4918,7 +4914,7 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache, } /* Count how many dirty pages are interesting */ - for (file_hash= 0; file_hash < PAGECACHE_CHANGED_BLOCKS_HASH; file_hash++) + for (file_hash= 0; file_hash < pagecache->changed_blocks_hash_size; file_hash++) { PAGECACHE_BLOCK_LINK *block; for (block= pagecache->changed_blocks[file_hash] ; @@ -4957,7 +4953,7 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache, DBUG_PRINT("info", ("found %lu dirty pages", stored_list_size)); if (stored_list_size == 0) goto end; - for (file_hash= 0; file_hash < PAGECACHE_CHANGED_BLOCKS_HASH; file_hash++) + for (file_hash= 0; file_hash < pagecache->changed_blocks_hash_size; file_hash++) { PAGECACHE_BLOCK_LINK *block; for (block= pagecache->changed_blocks[file_hash] ; @@ -5008,7 +5004,7 @@ void pagecache_file_no_dirty_page(PAGECACHE *pagecache, PAGECACHE_FILE *file) { File fd= file->file; PAGECACHE_BLOCK_LINK *block; - for (block= pagecache->changed_blocks[FILE_HASH(*file)]; + for (block= pagecache->changed_blocks[FILE_HASH(*file, pagecache)]; block != NULL; block= block->next_changed) if (block->hash_link->file.file == fd) diff --git a/storage/maria/ma_pagecache.h b/storage/maria/ma_pagecache.h index 8460eaddc57..f7ddb2fe716 100644 --- a/storage/maria/ma_pagecache.h +++ b/storage/maria/ma_pagecache.h @@ -104,7 +104,9 @@ typedef struct st_pagecache_hash_link PAGECACHE_HASH_LINK; #include <wqueue.h> -#define PAGECACHE_CHANGED_BLOCKS_HASH 128 /* must be power of 2 */ +/* Default size of hash for changed files */ +#define MIN_PAGECACHE_CHANGED_BLOCKS_HASH_SIZE 512 + #define PAGECACHE_PRIORITY_LOW 0 #define PAGECACHE_PRIORITY_DEFAULT 3 #define PAGECACHE_PRIORITY_HIGH 6 @@ -121,6 +123,7 @@ typedef struct st_pagecache ulong age_threshold; /* age threshold for hot blocks */ ulonglong time; /* total number of block link operations */ ulong hash_entries; /* max number of entries in the hash table */ + ulong changed_blocks_hash_size; /* Number of hash buckets for file blocks */ long hash_links; /* max number of hash links */ long hash_links_used; /* number of hash links taken from free links pool */ long disk_blocks; /* max number of blocks in the cache */ @@ -145,9 +148,9 @@ typedef struct st_pagecache WQUEUE waiting_for_hash_link;/* waiting for a free hash link */ WQUEUE waiting_for_block; /* requests waiting for a free block */ /* hash for dirty file bl.*/ - PAGECACHE_BLOCK_LINK *changed_blocks[PAGECACHE_CHANGED_BLOCKS_HASH]; + PAGECACHE_BLOCK_LINK **changed_blocks; /* hash for other file bl.*/ - PAGECACHE_BLOCK_LINK *file_blocks[PAGECACHE_CHANGED_BLOCKS_HASH]; + PAGECACHE_BLOCK_LINK **file_blocks; /* The following variables are and variables used to hold parameters for @@ -195,10 +198,11 @@ extern PAGECACHE dflt_pagecache_var, *dflt_pagecache; extern ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, uint age_threshold, - uint block_size, myf my_read_flags); + uint block_size, uint changed_blocks_hash_size, + myf my_read_flags); extern ulong resize_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, - uint age_threshold); + uint age_threshold, uint changed_blocks_hash_size); extern void change_pagecache_param(PAGECACHE *pagecache, uint division_limit, uint age_threshold); diff --git a/storage/maria/ma_rt_test.c b/storage/maria/ma_rt_test.c index 29244bab6ce..9d8574212ca 100644 --- a/storage/maria/ma_rt_test.c +++ b/storage/maria/ma_rt_test.c @@ -100,11 +100,11 @@ int main(int argc, char *argv[]) /* Maria requires that we always have a page cache */ if (maria_init() || (init_pagecache(maria_pagecache, maria_block_size * 16, 0, 0, - maria_block_size, MY_WME) == 0) || + maria_block_size, 0, MY_WME) == 0) || ma_control_file_open(TRUE, TRUE) || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0) || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || diff --git a/storage/maria/ma_state.c b/storage/maria/ma_state.c index f130da21d07..0c673ded04e 100644 --- a/storage/maria/ma_state.c +++ b/storage/maria/ma_state.c @@ -240,6 +240,7 @@ void _ma_reset_state(MARIA_HA *info) MARIA_STATE_HISTORY *history= share->state_history; DBUG_ENTER("_ma_reset_state"); + /* Always true if share->now_transactional is set */ if (history) { MARIA_STATE_HISTORY *next; @@ -769,7 +770,7 @@ void _ma_copy_nontrans_state_information(MARIA_HA *info) /** Reset history - This is only called during repair when we the only one using the table. + This is only called during repair when we are the only one using the table. */ void _ma_reset_history(MARIA_SHARE *share) diff --git a/storage/maria/ma_test1.c b/storage/maria/ma_test1.c index 595b87ef4d0..901a7ef06e3 100644 --- a/storage/maria/ma_test1.c +++ b/storage/maria/ma_test1.c @@ -79,11 +79,11 @@ int main(int argc,char *argv[]) /* Maria requires that we always have a page cache */ if (maria_init() || (init_pagecache(maria_pagecache, maria_block_size * 16, 0, 0, - maria_block_size, MY_WME) == 0) || + maria_block_size, 0, MY_WME) == 0) || ma_control_file_open(TRUE, TRUE) || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0) || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || diff --git a/storage/maria/ma_test2.c b/storage/maria/ma_test2.c index 52c0839cff6..709a190c1a7 100644 --- a/storage/maria/ma_test2.c +++ b/storage/maria/ma_test2.c @@ -91,11 +91,11 @@ int main(int argc, char *argv[]) /* Maria requires that we always have a page cache */ if (maria_init() || (init_pagecache(maria_pagecache, pagecache_size, 0, 0, - maria_block_size, MY_WME) == 0) || + maria_block_size, 0, MY_WME) == 0) || ma_control_file_open(TRUE, TRUE) || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0) || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || diff --git a/storage/maria/ma_test3.c b/storage/maria/ma_test3.c index 64b22e45c1b..5d57bef8f9e 100644 --- a/storage/maria/ma_test3.c +++ b/storage/maria/ma_test3.c @@ -178,7 +178,7 @@ void start_test(int id) exit(1); } if (pagecacheing && rnd(2) == 0) - init_pagecache(maria_pagecache, 65536L, 0, 0, MARIA_KEY_BLOCK_LENGTH, + init_pagecache(maria_pagecache, 65536L, 0, 0, MARIA_KEY_BLOCK_LENGTH, 0, MY_WME); printf("Process %d, pid: %ld\n",id,(long) getpid()); fflush(stdout); diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c index 9235d5ee96a..2ea647ea1f5 100644 --- a/storage/maria/maria_chk.c +++ b/storage/maria/maria_chk.c @@ -140,7 +140,7 @@ int main(int argc, char **argv) { if (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0 || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0 || translog_init(opt_log_dir, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0)) @@ -1178,7 +1178,7 @@ static int maria_chk(HA_CHECK *param, char *filename) maria_lock_database(info, F_EXTRA_LCK); datafile= info->dfile.file; if (init_pagecache(maria_pagecache, (size_t) param->use_buffers, 0, 0, - maria_block_size, MY_WME) == 0) + maria_block_size, 0, MY_WME) == 0) { _ma_check_print_error(param, "Can't initialize page cache with %lu memory", (ulong) param->use_buffers); diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index ab4ade30c44..b878aaa0f7d 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -364,6 +364,7 @@ typedef struct st_maria_share LEX_STRING index_file_name; LEX_STRING open_file_name; /* parameter to open filename */ uchar *file_map; /* mem-map of file if possible */ + LIST *open_list; /* Tables open with this share */ PAGECACHE *pagecache; /* ref to the current key cache */ MARIA_DECODE_TREE *decode_trees; /* @@ -629,6 +630,7 @@ struct st_maria_handler PAGECACHE_FILE dfile; /* The datafile */ IO_CACHE rec_cache; /* When cacheing records */ LIST open_list; + LIST share_list; MY_BITMAP changed_fields; ulong row_base_length; /* Length of row header */ uint row_flag; /* Flag to store in row header */ diff --git a/storage/maria/maria_ftdump.c b/storage/maria/maria_ftdump.c index 68e13a8ddc4..4e34678c8f8 100644 --- a/storage/maria/maria_ftdump.c +++ b/storage/maria/maria_ftdump.c @@ -85,7 +85,7 @@ int main(int argc,char *argv[]) } init_pagecache(maria_pagecache, PAGE_BUFFER_INIT, 0, 0, - MARIA_KEY_BLOCK_LENGTH, MY_WME); + MARIA_KEY_BLOCK_LENGTH, 0, MY_WME); if (!(info=maria_open(argv[0], O_RDONLY, HA_OPEN_ABORT_IF_LOCKED|HA_OPEN_FROM_SQL_LAYER))) diff --git a/storage/maria/maria_pack.c b/storage/maria/maria_pack.c index 26d57ade59a..7eca9e14e93 100644 --- a/storage/maria/maria_pack.c +++ b/storage/maria/maria_pack.c @@ -511,7 +511,7 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table) fn_format(org_name,isam_file->s->open_file_name.str, "",MARIA_NAME_DEXT, 2+4+16); if (init_pagecache(maria_pagecache, MARIA_MIN_PAGE_CACHE_SIZE, 0, 0, - maria_block_size, MY_WME) == 0) + maria_block_size, 0, MY_WME) == 0) { fprintf(stderr, "Can't initialize page cache\n"); goto err; diff --git a/storage/maria/maria_read_log.c b/storage/maria/maria_read_log.c index f5b91f9628f..8fa6533bc46 100644 --- a/storage/maria/maria_read_log.c +++ b/storage/maria/maria_read_log.c @@ -70,7 +70,7 @@ int main(int argc, char **argv) goto err; } if (init_pagecache(maria_pagecache, opt_page_buffer_size, 0, 0, - maria_block_size, MY_WME) == 0) + maria_block_size, 0, MY_WME) == 0) { fprintf(stderr, "Got error in init_pagecache() (errno: %d)\n", errno); goto err; @@ -82,7 +82,7 @@ int main(int argc, char **argv) which is useless. TODO: start log handler in read-only mode. */ if (init_pagecache(maria_log_pagecache, opt_translog_buffer_size, - 0, 0, TRANSLOG_PAGE_SIZE, MY_WME) == 0 || + 0, 0, TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0 || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, opt_display_only)) diff --git a/storage/maria/unittest/ma_pagecache_consist.c b/storage/maria/unittest/ma_pagecache_consist.c index 6a25a47591c..5f0e25b5bf4 100644 --- a/storage/maria/unittest/ma_pagecache_consist.c +++ b/storage/maria/unittest/ma_pagecache_consist.c @@ -431,7 +431,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, 0)) == 0) + TEST_PAGE_SIZE, 0, 0)) == 0) { diag("Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_pagecache_rwconsist.c b/storage/maria/unittest/ma_pagecache_rwconsist.c index 7afdbfd0ac1..1a268db6ad5 100644 --- a/storage/maria/unittest/ma_pagecache_rwconsist.c +++ b/storage/maria/unittest/ma_pagecache_rwconsist.c @@ -301,7 +301,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, 0)) == 0) + TEST_PAGE_SIZE, 0, 0)) == 0) { diag("Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_pagecache_rwconsist2.c b/storage/maria/unittest/ma_pagecache_rwconsist2.c index 917fddd0bcf..751c045a879 100644 --- a/storage/maria/unittest/ma_pagecache_rwconsist2.c +++ b/storage/maria/unittest/ma_pagecache_rwconsist2.c @@ -297,7 +297,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, 0)) == 0) + TEST_PAGE_SIZE, 0, 0)) == 0) { diag("Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_pagecache_single.c b/storage/maria/unittest/ma_pagecache_single.c index 0031582589e..64f6782f20f 100644 --- a/storage/maria/unittest/ma_pagecache_single.c +++ b/storage/maria/unittest/ma_pagecache_single.c @@ -828,7 +828,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, MYF(MY_WME))) == 0) + TEST_PAGE_SIZE, 0, MYF(MY_WME))) == 0) { fprintf(stderr,"Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_test_loghandler-t.c b/storage/maria/unittest/ma_test_loghandler-t.c index abf2078ce8f..18650fa400d 100644 --- a/storage/maria/unittest/ma_test_loghandler-t.c +++ b/storage/maria/unittest/ma_test_loghandler-t.c @@ -147,7 +147,6 @@ int main(int argc __attribute__((unused)), char *argv[]) { uint32 i; uint32 rec_len; - uint pagen; uchar long_tr_id[6]; uchar lsn_buff[23]= { @@ -203,8 +202,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c index 9ebd56c754c..cf86b59da45 100644 --- a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c @@ -35,7 +35,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn, first_lsn, theor_lsn; @@ -72,8 +71,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c index 4ae9def8598..855135451c3 100644 --- a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c @@ -36,7 +36,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { ulong i; - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn, max_lsn, last_lsn= LSN_IMPOSSIBLE; @@ -70,8 +69,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c index c8e63cb26ab..63d1f1c6977 100644 --- a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c @@ -226,7 +226,6 @@ int main(int argc __attribute__((unused)), char *argv[]) { uint32 i; uint32 rec_len; - uint pagen; uchar long_tr_id[6]; uchar lsn_buff[23]= { @@ -284,8 +283,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); @@ -447,8 +446,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "pass2: Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "pass2: Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_multithread-t.c b/storage/maria/unittest/ma_test_loghandler_multithread-t.c index 18fbaeace5a..535f363048b 100644 --- a/storage/maria/unittest/ma_test_loghandler_multithread-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multithread-t.c @@ -261,7 +261,6 @@ int main(int argc __attribute__((unused)), char **argv __attribute__ ((unused))) { uint32 i; - uint pagen; PAGECACHE pagecache; LSN first_lsn; TRANSLOG_HEADER_BUFFER rec; @@ -341,8 +340,8 @@ int main(int argc __attribute__((unused)), fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_noflush-t.c b/storage/maria/unittest/ma_test_loghandler_noflush-t.c index c8c0f7d1873..8d0af947574 100644 --- a/storage/maria/unittest/ma_test_loghandler_noflush-t.c +++ b/storage/maria/unittest/ma_test_loghandler_noflush-t.c @@ -34,7 +34,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { - uint pagen; int rc= 1; uchar long_tr_id[6]; PAGECACHE pagecache; @@ -71,8 +70,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_nologs-t.c b/storage/maria/unittest/ma_test_loghandler_nologs-t.c index 24c93e428e1..5d6db7355c2 100644 --- a/storage/maria/unittest/ma_test_loghandler_nologs-t.c +++ b/storage/maria/unittest/ma_test_loghandler_nologs-t.c @@ -36,7 +36,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { ulong i; - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn; @@ -72,8 +71,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); @@ -145,8 +144,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_pagecache-t.c b/storage/maria/unittest/ma_test_loghandler_pagecache-t.c index a939cef71a7..e634506628a 100644 --- a/storage/maria/unittest/ma_test_loghandler_pagecache-t.c +++ b/storage/maria/unittest/ma_test_loghandler_pagecache-t.c @@ -64,7 +64,6 @@ dummy_fail_callback(uchar* data_ptr __attribute__((unused))) int main(int argc __attribute__((unused)), char *argv[]) { - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn; @@ -99,8 +98,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_purge-t.c b/storage/maria/unittest/ma_test_loghandler_purge-t.c index 6ae0e7830ae..a13645f1bb8 100644 --- a/storage/maria/unittest/ma_test_loghandler_purge-t.c +++ b/storage/maria/unittest/ma_test_loghandler_purge-t.c @@ -73,7 +73,7 @@ int main(int argc __attribute__((unused)), char *argv[]) exit(1); } if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + PCACHE_PAGE, 0, 0)) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c index 0e89d7d1b3a..5612b4bec14 100644 --- a/storage/myisam/ft_parser.c +++ b/storage/myisam/ft_parser.c @@ -329,7 +329,7 @@ MYSQL_FTPARSER_PARAM* ftparser_alloc_param(MI_INFO *info) { if (!info->ftparser_param) { - /* + /* . info->ftparser_param can not be zero after the initialization, because it always includes built-in fulltext parser. And built-in parser can be called even if the table has no fulltext indexes and diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index bc4c5139810..f90e01d8cc9 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -823,7 +823,15 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) table->key_info[i].block_size= file->s->keyinfo[i].block_length; } my_errno= 0; + + /* Count statistics of usage for newly open normal files */ + if (file->s->reopen == 1 && ! (test_if_locked & HA_OPEN_TMP_TABLE)) + { + if (file->s->delay_key_write) + feature_files_opened_with_delayed_keys++; + } goto end; + err: this->close(); end: @@ -1080,7 +1088,6 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) param.db_name= table->s->db.str; param.table_name= table->alias.c_ptr(); - param.tmpfile_createflag= O_RDWR | O_TRUNC | O_EXCL; param.using_global_keycache = 1; param.thd= thd; param.tmpdir= &mysql_tmpdir_list; diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 3a2bdb2e899..b79d6c891f1 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -1536,7 +1536,7 @@ int mi_repair(HA_CHECK *param, register MI_INFO *info, if (!param->using_global_keycache) (void) init_key_cache(dflt_key_cache, param->key_cache_block_size, - (size_t) param->use_buffers, 0, 0, 0); + (size_t) param->use_buffers, 0, 0, 0, 0); if (init_io_cache(¶m->read_cache,info->dfile, (uint) param->read_buffer_length, diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c index 87cea2e5566..d90a8549e1e 100644 --- a/storage/myisam/mi_test1.c +++ b/storage/myisam/mi_test1.c @@ -51,7 +51,7 @@ int main(int argc,char *argv[]) my_init(); if (key_cacheing) init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,IO_SIZE*16,0,0, - DEFAULT_KEY_CACHE_PARTITIONS); + 0, DEFAULT_KEY_CACHE_PARTITIONS); get_options(argc,argv); exit(run_test("test1")); diff --git a/storage/myisam/mi_test2.c b/storage/myisam/mi_test2.c index e53c68874b2..be58b3c54d0 100644 --- a/storage/myisam/mi_test2.c +++ b/storage/myisam/mi_test2.c @@ -217,7 +217,7 @@ int main(int argc, char *argv[]) printf("- Writing key:s\n"); if (key_cacheing) init_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size,0,0, - DEFAULT_KEY_CACHE_PARTITIONS); + 0, DEFAULT_KEY_CACHE_PARTITIONS); if (do_locking) mi_lock_database(file,F_WRLCK); if (write_cacheing) @@ -278,8 +278,9 @@ int main(int argc, char *argv[]) } } if (key_cacheing) - resize_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size*2,0,0); - + resize_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size*2, + 0, 0, 0); + if (!silent) printf("- Delete\n"); for (i=0 ; i<recant/10 ; i++) diff --git a/storage/myisam/mi_test3.c b/storage/myisam/mi_test3.c index 885118d4eec..e05398f7c4a 100644 --- a/storage/myisam/mi_test3.c +++ b/storage/myisam/mi_test3.c @@ -178,7 +178,7 @@ void start_test(int id) } if (key_cacheing && rnd(2) == 0) init_key_cache(dflt_key_cache, KEY_CACHE_BLOCK_SIZE, 65536L, 0, 0, - DEFAULT_KEY_CACHE_PARTITIONS); + 0, DEFAULT_KEY_CACHE_PARTITIONS); printf("Process %d, pid: %ld\n", id, (long) getpid()); fflush(stdout); diff --git a/storage/myisam/mi_test_all.sh b/storage/myisam/mi_test_all.sh index 12c28d7d132..e6327fd8247 100755 --- a/storage/myisam/mi_test_all.sh +++ b/storage/myisam/mi_test_all.sh @@ -156,9 +156,9 @@ echo "mi_test2$suffix $silent -L -K -R1 -m2000 ; Should give error 135" ./myisamchk$suffix -sm test2 ./mi_test2$suffix $silent -L -K -W -P -m50 -l -./myisamlog$suffix +./myisamlog$suffix -P ./mi_test2$suffix $silent -L -K -W -P -m50 -l -b100 -./myisamlog$suffix +./myisamlog$suffix -P time ./mi_test2$suffix $silent time ./mi_test2$suffix $silent -K -B time ./mi_test2$suffix $silent -L -B diff --git a/storage/myisam/myisam_ftdump.c b/storage/myisam/myisam_ftdump.c index e1ea9f2de37..55ee3795f9b 100644 --- a/storage/myisam/myisam_ftdump.c +++ b/storage/myisam/myisam_ftdump.c @@ -84,7 +84,7 @@ int main(int argc,char *argv[]) usage(); } - init_key_cache(dflt_key_cache, MI_KEY_BLOCK_LENGTH, KEY_BUFFER_INIT, 0, 0, 0); + init_key_cache(dflt_key_cache, MI_KEY_BLOCK_LENGTH, KEY_BUFFER_INIT, 0, 0, 0, 0); if (!(info=mi_open(argv[0], O_RDONLY, HA_OPEN_ABORT_IF_LOCKED|HA_OPEN_FROM_SQL_LAYER))) diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index 0cd01398cbc..7835ab83531 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -1115,7 +1115,7 @@ static int myisamchk(HA_CHECK *param, char * filename) { if (param->testflag & (T_EXTEND | T_MEDIUM)) (void) init_key_cache(dflt_key_cache,opt_key_cache_block_size, - param->use_buffers, 0, 0, 0); + param->use_buffers, 0, 0, 0, 0); (void) init_io_cache(¶m->read_cache,datafile, (uint) param->read_buffer_length, READ_CACHE, @@ -1532,7 +1532,7 @@ static int mi_sort_records(HA_CHECK *param, DBUG_RETURN(0); /* Nothing to do */ init_key_cache(dflt_key_cache, opt_key_cache_block_size, - (size_t) param->use_buffers, 0, 0, 0); + (size_t) param->use_buffers, 0, 0, 0, 0); if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length, WRITE_CACHE,share->pack.header_length,1, MYF(MY_WME | MY_WAIT_IF_FULL))) diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c index 86e1978edaa..d549dd76037 100644 --- a/storage/myisam/myisamlog.c +++ b/storage/myisam/myisamlog.c @@ -333,7 +333,7 @@ static int examine_log(char * file_name, char **table_names) (tree_element_free) file_info_free, NULL, MYF(MY_TREE_WITH_DELETE)); (void) init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE, - 0, 0, 0); + 0, 0, 0, 0); files_open=0; access_time=0; while (access_time++ != number_of_commands && diff --git a/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.result b/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.result index a639a185ec2..39e476b2403 100644 --- a/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.result +++ b/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.result @@ -1,3 +1,4 @@ +call mtr.add_suppression("99999999 is open on delete"); Performing OQGraph regression test mdev5996 - using db=``, table=`999999999` use test; drop table if exists `999999999` ; diff --git a/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.test b/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.test index e5d04ef357d..cb4563a5759 100644 --- a/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.test +++ b/storage/oqgraph/mysql-test/oqgraph/regression_mdev5996.test @@ -2,6 +2,8 @@ # MidSchipDB_unstable --let $oqgraph_table_name= 999999999 +call mtr.add_suppression("99999999 is open on delete"); + --let $oqgraph_database_name= --source regression_mdev5996.inc diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc index 58db8dc0031..95ac1c85f00 100644 --- a/storage/perfschema/ha_perfschema.cc +++ b/storage/perfschema/ha_perfschema.cc @@ -205,7 +205,7 @@ maria_declare_plugin(perfschema) 0x0001, pfs_status_vars, NULL, - "0.1", + "5.6.20", MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; diff --git a/storage/perfschema/table_events_statements.cc b/storage/perfschema/table_events_statements.cc index 96a79eecd00..a3f2680c3e7 100644 --- a/storage/perfschema/table_events_statements.cc +++ b/storage/perfschema/table_events_statements.cc @@ -249,7 +249,7 @@ table_events_statements_current::m_share= &table_events_statements_current::delete_all_rows, NULL, /* get_row_count */ 1000, /* records */ - sizeof(PFS_simple_index), /* ref length */ + sizeof(pos_events_statements_current), /* ref length */ &m_table_lock, &m_field_def, false /* checked */ diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc index e1111bb0f7c..57a558465a2 100644 --- a/storage/sequence/sequence.cc +++ b/storage/sequence/sequence.cc @@ -376,7 +376,7 @@ maria_declare_plugin(sequence) NULL, NULL, "0.1", - MariaDB_PLUGIN_MATURITY_GAMMA + MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; diff --git a/storage/tokudb/CMakeLists.in b/storage/tokudb/CMakeLists.in deleted file mode 100644 index 20c05126841..00000000000 --- a/storage/tokudb/CMakeLists.in +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2006 MySQL AB -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DTOKUDB_VERSION=\\\"TOKUDB_VERSION_REPLACE_ME\\\"") -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(TOKUDB_DIR_REPLACE_ME/windows - TOKUDB_DIR_REPLACE_ME/src - TOKUDB_DIR_REPLACE_ME/include - TOKUDB_DIR_REPLACE_ME/toku_include) - -INCLUDE("${PROJECT_SOURCE_DIR}/storage/mysql_storage_engine.cmake") -SET(TOKUDB_SOURCES hatoku_hton.cc ha_tokudb.cc hatoku_cmp.cc) -MYSQL_STORAGE_ENGINE(TOKUDB) - -TARGET_LINK_LIBRARIES(ha_tokudb PowrProf optimized TOKUDB_OBJ_DIR_REPLACE_ME/opt/ipo_libtokudb optimized TOKUDB_OBJ_DIR_REPLACE_ME/opt/libtokuportability debug TOKUDB_OBJ_DIR_REPLACE_ME/debug/static_libtokudb debug TOKUDB_OBJ_DIR_REPLACE_ME/debug/libtokuportability) diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index 6657e1088a1..4bac1003a55 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -14,11 +14,12 @@ IF(NOT TOKUDB_OK OR WITHOUT_TOKUDB OR WITHOUT_TOKUDB_STORAGE_ENGINE) ENDIF() ############################################ -SET(TOKUDB_VERSION "7.1.6") +SET(TOKUDB_VERSION "7.1.7") SET(TOKUDB_DEB_FILES "usr/lib/mysql/plugin/ha_tokudb.so\netc/mysql/conf.d/tokudb.cnf\nusr/bin/tokuftdump\nusr/share/doc/mariadb-galera-server-10.0/README-TOKUDB\nusr/share/doc/mariadb-galera-server-10.0/README.md" PARENT_SCOPE) SET(USE_BDB OFF CACHE BOOL "") SET(USE_VALGRIND OFF CACHE BOOL "") SET(BUILD_TESTING OFF CACHE BOOL "") +SET(TOKU_DEBUG_PARANOID OFF CACHE BOOL "") MARK_AS_ADVANCED(BUILDNAME) MARK_AS_ADVANCED(BUILD_TESTING) MARK_AS_ADVANCED(CMAKE_TOKUDB_REVISION) diff --git a/storage/tokudb/README.md b/storage/tokudb/README.md index 7d4ebcefce1..1deb3699c5a 100644 --- a/storage/tokudb/README.md +++ b/storage/tokudb/README.md @@ -24,14 +24,14 @@ working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage engine, called `make.mysql.bash`. This script will download copies of the needed source code from github and build everything. -To build MySQL 5.5.36 with TokuDB 7.1.5: +To build MySQL 5.5.37 with TokuDB 7.1.6: ```sh -scripts/make.mysql.bash --mysqlbuild=mysql-5.5.36-tokudb-7.1.5-linux-x86_64 +scripts/make.mysql.bash --mysqlbuild=mysql-5.5.37-tokudb-7.1.6-linux-x86_64 ``` -To build MariaDB 5.5.36 with TokuDB 7.1.5: +To build MariaDB 5.5.37 with TokuDB 7.1.6: ```sh -scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.36-tokudb-7.1.5-linux-x86_64 +scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.37-tokudb-7.1.6-linux-x86_64 ``` Before you start, make sure you have a C++11-compatible compiler (GCC >= diff --git a/storage/tokudb/ft-index/CMakeLists.txt b/storage/tokudb/ft-index/CMakeLists.txt index 1228da8c35d..f28e7745295 100644 --- a/storage/tokudb/ft-index/CMakeLists.txt +++ b/storage/tokudb/ft-index/CMakeLists.txt @@ -6,6 +6,31 @@ project(TokuDB) set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "") set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "") +## Versions of gcc >= 4.9.0 require special version of 'ar' and 'ranlib' for +## link-time optimizations to work properly. +## +## From https://gcc.gnu.org/gcc-4.9/changes.html: +## +## When using a linker plugin, compiling with the -flto option now +## generates slim objects files (.o) which only contain intermediate +## language representation for LTO. Use -ffat-lto-objects to create +## files which contain additionally the object code. To generate +## static libraries suitable for LTO processing, use gcc-ar and +## gcc-ranlib; to list symbols from a slim object file use +## gcc-nm. (Requires that ar, ranlib and nm have been compiled with +## plugin support.) +if ((CMAKE_CXX_COMPILER_ID STREQUAL GNU) AND + NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9.0")) + find_program(gcc_ar "gcc-ar") + if (gcc_ar) + set(CMAKE_AR "${gcc_ar}") + endif () + find_program(gcc_ranlib "gcc-ranlib") + if (gcc_ranlib) + set(CMAKE_RANLIB "${gcc_ranlib}") + endif () +endif() + include(TokuFeatureDetection) include(TokuSetupCompiler) include(TokuSetupCTest) diff --git a/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake b/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake index 461390ffb7c..cb474c385af 100644 --- a/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake +++ b/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake @@ -3,35 +3,34 @@ include(ExternalProject) if (CMAKE_PROJECT_NAME STREQUAL TokuDB) ## add jemalloc with an external project set(JEMALLOC_SOURCE_DIR "${TokuDB_SOURCE_DIR}/third_party/jemalloc" CACHE FILEPATH "Where to find jemalloc sources.") - if (NOT EXISTS "${JEMALLOC_SOURCE_DIR}/configure") - message(FATAL_ERROR "Can't find jemalloc sources. Please check them out to ${JEMALLOC_SOURCE_DIR} or modify JEMALLOC_SOURCE_DIR.") - endif () - set(jemalloc_configure_opts "CC=${CMAKE_C_COMPILER}" "--with-jemalloc-prefix=" "--with-private-namespace=tokudb_jemalloc_internal_" "--enable-cc-silence") - option(JEMALLOC_DEBUG "Build jemalloc with --enable-debug." OFF) - if (JEMALLOC_DEBUG) - list(APPEND jemalloc_configure_opts --enable-debug) - endif () - ExternalProject_Add(build_jemalloc - PREFIX jemalloc - SOURCE_DIR "${JEMALLOC_SOURCE_DIR}" - CONFIGURE_COMMAND - "${JEMALLOC_SOURCE_DIR}/configure" ${jemalloc_configure_opts} - "--prefix=${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc" - ) + if (EXISTS "${JEMALLOC_SOURCE_DIR}/configure") + set(jemalloc_configure_opts "CC=${CMAKE_C_COMPILER}" "--with-jemalloc-prefix=" "--with-private-namespace=tokudb_jemalloc_internal_" "--enable-cc-silence") + option(JEMALLOC_DEBUG "Build jemalloc with --enable-debug." OFF) + if (JEMALLOC_DEBUG) + list(APPEND jemalloc_configure_opts --enable-debug) + endif () + ExternalProject_Add(build_jemalloc + PREFIX jemalloc + SOURCE_DIR "${JEMALLOC_SOURCE_DIR}" + CONFIGURE_COMMAND + "${JEMALLOC_SOURCE_DIR}/configure" ${jemalloc_configure_opts} + "--prefix=${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc" + ) - add_library(jemalloc STATIC IMPORTED GLOBAL) - set_target_properties(jemalloc PROPERTIES IMPORTED_LOCATION - "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc_pic.a") - add_dependencies(jemalloc build_jemalloc) - add_library(jemalloc_nopic STATIC IMPORTED GLOBAL) - set_target_properties(jemalloc_nopic PROPERTIES IMPORTED_LOCATION - "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc.a") - add_dependencies(jemalloc_nopic build_jemalloc) + add_library(jemalloc STATIC IMPORTED GLOBAL) + set_target_properties(jemalloc PROPERTIES IMPORTED_LOCATION + "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc_pic.a") + add_dependencies(jemalloc build_jemalloc) + add_library(jemalloc_nopic STATIC IMPORTED GLOBAL) + set_target_properties(jemalloc_nopic PROPERTIES IMPORTED_LOCATION + "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc.a") + add_dependencies(jemalloc_nopic build_jemalloc) - # detect when we are being built as a subproject - if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING) - install(DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib" DESTINATION . - COMPONENT tokukv_libs_extra) + # detect when we are being built as a subproject + if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING) + install(DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib" DESTINATION . + COMPONENT tokukv_libs_extra) + endif () endif () endif () diff --git a/storage/tokudb/ft-index/ft/checkpoint.cc b/storage/tokudb/ft-index/ft/checkpoint.cc index 3d26c3a460e..bc4629a1d08 100644 --- a/storage/tokudb/ft-index/ft/checkpoint.cc +++ b/storage/tokudb/ft-index/ft/checkpoint.cc @@ -158,8 +158,8 @@ status_init(void) { STATUS_INIT(CP_TIME_LAST_CHECKPOINT_BEGIN, CHECKPOINT_LAST_BEGAN, UNIXTIME, "last checkpoint began ", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_TIME_LAST_CHECKPOINT_BEGIN_COMPLETE, CHECKPOINT_LAST_COMPLETE_BEGAN, UNIXTIME, "last complete checkpoint began ", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_TIME_LAST_CHECKPOINT_END, CHECKPOINT_LAST_COMPLETE_ENDED, UNIXTIME, "last complete checkpoint ended", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(CP_TIME_CHECKPOINT_DURATION, CHECKPOINT_DURATION, UNIXTIME, "time spent during checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(CP_TIME_CHECKPOINT_DURATION_LAST, CHECKPOINT_DURATION_LAST, UNIXTIME, "time spent during last checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(CP_TIME_CHECKPOINT_DURATION, CHECKPOINT_DURATION, UINT64, "time spent during checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(CP_TIME_CHECKPOINT_DURATION_LAST, CHECKPOINT_DURATION_LAST, UINT64, "time spent during last checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_LAST_LSN, nullptr, UINT64, "last complete checkpoint LSN", TOKU_ENGINE_STATUS); STATUS_INIT(CP_CHECKPOINT_COUNT, CHECKPOINT_TAKEN, UINT64, "checkpoints taken ", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_CHECKPOINT_COUNT_FAIL, CHECKPOINT_FAILED, UINT64, "checkpoints failed", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); @@ -381,8 +381,8 @@ toku_checkpoint(CHECKPOINTER cp, TOKULOGGER logger, STATUS_VALUE(CP_LONG_BEGIN_TIME) += duration; STATUS_VALUE(CP_LONG_BEGIN_COUNT) += 1; } - STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION) += ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); - STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION_LAST) = ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); + STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION) += (uint64_t) ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); + STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION_LAST) = (uint64_t) ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); STATUS_VALUE(CP_FOOTPRINT) = 0; checkpoint_safe_checkpoint_unlock(); diff --git a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc index 1f3aa3e0baa..91a0040b02e 100644 --- a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc +++ b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc @@ -403,3 +403,25 @@ toku_unpin_ftnode_read_only(FT ft, FTNODE node) ); assert(r==0); } + +void toku_ftnode_swap_pair_values(FTNODE a, FTNODE b) +// Effect: Swap the blocknum, fullhash, and PAIR for for a and b +// Requires: Both nodes are pinned +{ + BLOCKNUM tmp_blocknum = a->thisnodename; + uint32_t tmp_fullhash = a->fullhash; + PAIR tmp_pair = a->ct_pair; + + a->thisnodename = b->thisnodename; + a->fullhash = b->fullhash; + a->ct_pair = b->ct_pair; + + b->thisnodename = tmp_blocknum; + b->fullhash = tmp_fullhash; + b->ct_pair = tmp_pair; + + // A and B swapped pair pointers, but we still have to swap + // the actual pair values (ie: the FTNODEs they represent) + // in the cachetable. + toku_cachetable_swap_pair_values(a->ct_pair, b->ct_pair); +} diff --git a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h index 9a56f4ff220..dc84d7f006b 100644 --- a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h +++ b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h @@ -190,4 +190,7 @@ int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pai void toku_unpin_ftnode(FT h, FTNODE node); void toku_unpin_ftnode_read_only(FT ft, FTNODE node); +// Effect: Swaps pair values of two pinned nodes +void toku_ftnode_swap_pair_values(FTNODE nodea, FTNODE nodeb); + #endif diff --git a/storage/tokudb/ft-index/ft/ft-flusher.cc b/storage/tokudb/ft-index/ft/ft-flusher.cc index 0fe556aec0f..dc4096a7993 100644 --- a/storage/tokudb/ft-index/ft/ft-flusher.cc +++ b/storage/tokudb/ft-index/ft/ft-flusher.cc @@ -565,6 +565,7 @@ static bool may_node_be_reactive(FT ft, FTNODE node) */ static void handle_split_of_child( + FT ft, FTNODE node, int childnum, FTNODE childa, @@ -607,8 +608,20 @@ handle_split_of_child( paranoid_invariant(BP_BLOCKNUM(node, childnum).b==childa->thisnodename.b); // use the same child + // We never set the rightmost blocknum to be the root. + // Instead, we wait for the root to split and let promotion initialize the rightmost + // blocknum to be the first non-root leaf node on the right extreme to recieve an insert. + invariant(ft->h->root_blocknum.b != ft->rightmost_blocknum.b); + if (childa->thisnodename.b == ft->rightmost_blocknum.b) { + // The rightmost leaf (a) split into (a) and (b). We want (b) to swap pair values + // with (a), now that it is the new rightmost leaf. This keeps the rightmost blocknum + // constant, the same the way we keep the root blocknum constant. + toku_ftnode_swap_pair_values(childa, childb); + BP_BLOCKNUM(node, childnum) = childa->thisnodename; + } + BP_BLOCKNUM(node, childnum+1) = childb->thisnodename; - BP_WORKDONE(node, childnum+1) = 0; + BP_WORKDONE(node, childnum+1) = 0; BP_STATE(node,childnum+1) = PT_AVAIL; NONLEAF_CHILDINFO new_bnc = toku_create_empty_nl(); @@ -1071,7 +1084,7 @@ ft_split_child( ft_nonleaf_split(h, child, &nodea, &nodeb, &splitk, 2, dep_nodes); } // printf("%s:%d child did split\n", __FILE__, __LINE__); - handle_split_of_child (node, childnum, nodea, nodeb, &splitk); + handle_split_of_child (h, node, childnum, nodea, nodeb, &splitk); // for test call_flusher_thread_callback(flt_flush_during_split); @@ -1489,6 +1502,14 @@ ft_merge_child( &node->childkeys[childnuma+1], (node->n_children-childnumb)*sizeof(node->childkeys[0])); REALLOC_N(node->n_children-1, node->childkeys); + + // Handle a merge of the rightmost leaf node. + if (did_merge && childb->thisnodename.b == h->rightmost_blocknum.b) { + invariant(childb->thisnodename.b != h->h->root_blocknum.b); + toku_ftnode_swap_pair_values(childa, childb); + BP_BLOCKNUM(node, childnuma) = childa->thisnodename; + } + paranoid_invariant(BP_BLOCKNUM(node, childnuma).b == childa->thisnodename.b); childa->dirty = 1; // just to make sure childb->dirty = 1; // just to make sure diff --git a/storage/tokudb/ft-index/ft/ft-internal.h b/storage/tokudb/ft-index/ft/ft-internal.h index 42d27638330..378e8921328 100644 --- a/storage/tokudb/ft-index/ft/ft-internal.h +++ b/storage/tokudb/ft-index/ft/ft-internal.h @@ -123,6 +123,10 @@ enum { FT_DEFAULT_FANOUT = 16 }; enum { FT_DEFAULT_NODE_SIZE = 4 * 1024 * 1024 }; enum { FT_DEFAULT_BASEMENT_NODE_SIZE = 128 * 1024 }; +// We optimize for a sequential insert pattern if 100 consecutive injections +// happen into the rightmost leaf node due to promotion. +enum { FT_SEQINSERT_SCORE_THRESHOLD = 100 }; + // // Field in ftnode_fetch_extra that tells the // partial fetch callback what piece of the node @@ -572,6 +576,22 @@ struct ft { // is this ft a blackhole? if so, all messages are dropped. bool blackhole; + + // The blocknum of the rightmost leaf node in the tree. Stays constant through splits + // and merges using pair-swapping (like the root node, see toku_ftnode_swap_pair_values()) + // + // This field only transitions from RESERVED_BLOCKNUM_NULL to non-null, never back. + // We initialize it when promotion inserts into a non-root leaf node on the right extreme. + // We use the blocktable lock to protect the initialize transition, though it's not really + // necessary since all threads should be setting it to the same value. We maintain that invariant + // on first initialization, see ft_set_or_verify_rightmost_blocknum() + BLOCKNUM rightmost_blocknum; + + // sequential access pattern heuristic + // - when promotion pushes a message directly into the rightmost leaf, the score goes up. + // - if the score is high enough, we optimistically attempt to insert directly into the rightmost leaf + // - if our attempt fails because the key was not in range of the rightmost leaf, we reset the score back to 0 + uint32_t seqinsert_score; }; // Allocate a DB struct off the stack and only set its comparison @@ -1037,7 +1057,7 @@ toku_get_node_for_verify( int toku_verify_ftnode (FT_HANDLE ft_h, - MSN rootmsn, MSN parentmsn, bool messages_exist_above, + MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above, FTNODE node, int height, const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) @@ -1186,6 +1206,9 @@ typedef enum { FT_PRO_NUM_DIDNT_WANT_PROMOTE, FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, // how many basement nodes were deserialized with a fixed keysize FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, // how many basement nodes were deserialized with a variable keysize + FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, + FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, + FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE, FT_STATUS_NUM_ROWS } ft_status_entry; diff --git a/storage/tokudb/ft-index/ft/ft-ops.cc b/storage/tokudb/ft-index/ft/ft-ops.cc index 64b6b498c9a..f9701ec34b1 100644 --- a/storage/tokudb/ft-index/ft/ft-ops.cc +++ b/storage/tokudb/ft-index/ft/ft-ops.cc @@ -367,6 +367,9 @@ status_init(void) STATUS_INIT(FT_PRO_NUM_DIDNT_WANT_PROMOTE, PROMOTION_STOPPED_AFTER_LOCKING_CHILD, PARCOUNT, "promotion: stopped anyway, after locking the child", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, BASEMENT_DESERIALIZATION_FIXED_KEY, PARCOUNT, "basement nodes deserialized with fixed-keysize", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, BASEMENT_DESERIALIZATION_VARIABLE_KEY, PARCOUNT, "basement nodes deserialized with variable-keysize", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, nullptr, PARCOUNT, "promotion: succeeded in using the rightmost leaf shortcut", TOKU_ENGINE_STATUS); + STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (out-of-bounds)", TOKU_ENGINE_STATUS); + STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (child reactive)", TOKU_ENGINE_STATUS); ft_status.initialized = true; } @@ -890,6 +893,11 @@ void toku_ftnode_clone_callback( for (int i = 0; i < node->n_children-1; i++) { toku_clone_dbt(&cloned_node->childkeys[i], node->childkeys[i]); } + if (node->height > 0) { + // need to move messages here so that we don't serialize stale + // messages to the fresh tree - ft verify code complains otherwise. + toku_move_ftnode_messages_to_stale(ft, node); + } // clone partition ftnode_clone_partitions(node, cloned_node); @@ -932,11 +940,14 @@ void toku_ftnode_flush_callback( int height = ftnode->height; if (write_me) { toku_assert_entire_node_in_memory(ftnode); - if (height == 0) { + if (height > 0 && !is_clone) { + // cloned nodes already had their stale messages moved, see toku_ftnode_clone_callback() + toku_move_ftnode_messages_to_stale(h, ftnode); + } else if (height == 0) { ft_leaf_run_gc(h, ftnode); - } - if (height == 0 && !is_clone) { - ftnode_update_disk_stats(ftnode, h, for_checkpoint); + if (!is_clone) { + ftnode_update_disk_stats(ftnode, h, for_checkpoint); + } } int r = toku_serialize_ftnode_to(fd, ftnode->thisnodename, ftnode, ndd, !is_clone, h, for_checkpoint); assert_zero(r); @@ -1079,9 +1090,10 @@ exit: return; } +static void ft_bnc_move_messages_to_stale(FT ft, NONLEAF_CHILDINFO bnc); + // replace the child buffer with a compressed version of itself. -// @return the old child buffer -static NONLEAF_CHILDINFO +static void compress_internal_node_partition(FTNODE node, int i, enum toku_compression_method compression_method) { // if we should evict, compress the @@ -1092,11 +1104,9 @@ compress_internal_node_partition(FTNODE node, int i, enum toku_compression_metho sub_block_init(sb); toku_create_compressed_partition_from_available(node, i, compression_method, sb); - // now set the state to compressed and return the old, available partition - NONLEAF_CHILDINFO bnc = BNC(node, i); + // now set the state to compressed set_BSB(node, i, sb); BP_STATE(node,i) = PT_COMPRESSED; - return bnc; } void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h) { @@ -1149,18 +1159,27 @@ int toku_ftnode_pe_callback(void *ftnode_pv, PAIR_ATTR old_attr, void *write_ext for (int i = 0; i < node->n_children; i++) { if (BP_STATE(node,i) == PT_AVAIL) { if (BP_SHOULD_EVICT(node,i)) { - NONLEAF_CHILDINFO bnc; - if (ft_compress_buffers_before_eviction) { - // When partially evicting, always compress with quicklz - bnc = compress_internal_node_partition( + NONLEAF_CHILDINFO bnc = BNC(node, i); + if (ft_compress_buffers_before_eviction && + // We may not serialize and compress a partition in memory if its + // in memory layout version is different than what's on disk (and + // therefore requires upgrade). + // + // Auto-upgrade code assumes that if a node's layout version read + // from disk is not current, it MUST require upgrade. Breaking + // this rule would cause upgrade code to upgrade this partition + // again after we serialize it as the current version, which is bad. + node->layout_version == node->layout_version_read_from_disk) { + ft_bnc_move_messages_to_stale(ft, bnc); + compress_internal_node_partition( node, i, + // Always compress with quicklz TOKU_QUICKLZ_METHOD ); } else { // We're not compressing buffers before eviction. Simply // detach the buffer and set the child's state to on-disk. - bnc = BNC(node, i); set_BNULL(node, i); BP_STATE(node, i) = PT_ON_DISK; } @@ -1626,12 +1645,10 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) BLOCKNUM old_blocknum = oldroot->thisnodename; uint32_t old_fullhash = oldroot->fullhash; - PAIR old_pair = oldroot->ct_pair; int new_height = oldroot->height+1; uint32_t new_fullhash; BLOCKNUM new_blocknum; - PAIR new_pair = NULL; cachetable_put_empty_node_with_dep_nodes( ft, @@ -1641,7 +1658,6 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) &new_fullhash, &newroot ); - new_pair = newroot->ct_pair; assert(newroot); assert(new_height > 0); @@ -1653,22 +1669,18 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) ft->h->layout_version, ft->h->flags ); + newroot->fullhash = new_fullhash; MSN msna = oldroot->max_msn_applied_to_node_on_disk; newroot->max_msn_applied_to_node_on_disk = msna; BP_STATE(newroot,0) = PT_AVAIL; newroot->dirty = 1; - // now do the "switcheroo" - BP_BLOCKNUM(newroot,0) = new_blocknum; - newroot->thisnodename = old_blocknum; - newroot->fullhash = old_fullhash; - newroot->ct_pair = old_pair; - - oldroot->thisnodename = new_blocknum; - oldroot->fullhash = new_fullhash; - oldroot->ct_pair = new_pair; - - toku_cachetable_swap_pair_values(old_pair, new_pair); + // Set the first child to have the new blocknum, + // and then swap newroot with oldroot. The new root + // will inherit the hash/blocknum/pair from oldroot, + // keeping the root blocknum constant. + BP_BLOCKNUM(newroot, 0) = new_blocknum; + toku_ftnode_swap_pair_values(newroot, oldroot); toku_ft_split_child( ft, @@ -2757,6 +2769,16 @@ static void inject_message_in_locked_node( // verify that msn of latest message was captured in root node paranoid_invariant(msg->msn.msn == node->max_msn_applied_to_node_on_disk.msn); + if (node->thisnodename.b == ft->rightmost_blocknum.b) { + if (ft->seqinsert_score < FT_SEQINSERT_SCORE_THRESHOLD) { + // we promoted to the rightmost leaf node and the seqinsert score has not yet saturated. + toku_sync_fetch_and_add(&ft->seqinsert_score, 1); + } + } else if (ft->seqinsert_score != 0) { + // we promoted to something other than the rightmost leaf node and the score should reset + ft->seqinsert_score = 0; + } + // if we call toku_ft_flush_some_child, then that function unpins the root // otherwise, we unpin ourselves if (node->height > 0 && toku_ft_nonleaf_is_gorged(node, ft->h->nodesize)) { @@ -2913,6 +2935,21 @@ static inline bool should_inject_in_node(seqinsert_loc loc, int height, int dept return (height == 0 || (loc == NEITHER_EXTREME && (height <= 1 || depth >= 2))); } +static void ft_set_or_verify_rightmost_blocknum(FT ft, BLOCKNUM b) +// Given: 'b', the _definitive_ and constant rightmost blocknum of 'ft' +{ + if (ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL) { + toku_ft_lock(ft); + if (ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL) { + ft->rightmost_blocknum = b; + } + toku_ft_unlock(ft); + } + // The rightmost blocknum only transitions from RESERVED_BLOCKNUM_NULL to non-null. + // If it's already set, verify that the stored value is consistent with 'b' + invariant(ft->rightmost_blocknum.b == b.b); +} + static void push_something_in_subtree( FT ft, FTNODE subtree_root, @@ -2960,6 +2997,14 @@ static void push_something_in_subtree( default: STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_GT3, 1); break; } + // If the target node is a non-root leaf node on the right extreme, + // set the rightmost blocknum. We know there are no messages above us + // because promotion would not chose to inject directly into this leaf + // otherwise. We explicitly skip the root node because then we don't have + // to worry about changing the rightmost blocknum when the root splits. + if (subtree_root->height == 0 && loc == RIGHT_EXTREME && subtree_root->thisnodename.b != ft->h->root_blocknum.b) { + ft_set_or_verify_rightmost_blocknum(ft, subtree_root->thisnodename); + } inject_message_in_locked_node(ft, subtree_root, target_childnum, msg, flow_deltas, gc_info); } else { int r; @@ -3230,7 +3275,260 @@ void toku_ft_root_put_msg( } } -// Effect: Insert the key-val pair into ft. +static int ft_compare_keys(FT ft, const DBT *a, const DBT *b) +// Effect: Compare two keys using the given fractal tree's comparator/descriptor +{ + FAKE_DB(db, &ft->cmp_descriptor); + return ft->compare_fun(&db, a, b); +} + +static LEAFENTRY bn_get_le_and_key(BASEMENTNODE bn, int idx, DBT *key) +// Effect: Gets the i'th leafentry from the given basement node and +// fill its key in *key +// Requires: The i'th leafentry exists. +{ + LEAFENTRY le; + uint32_t le_len; + void *le_key; + int r = bn->data_buffer.fetch_klpair(idx, &le, &le_len, &le_key); + invariant_zero(r); + toku_fill_dbt(key, le_key, le_len); + return le; +} + +static LEAFENTRY ft_leaf_leftmost_le_and_key(FTNODE leaf, DBT *leftmost_key) +// Effect: If a leftmost key exists in the given leaf, toku_fill_dbt() +// the key into *leftmost_key +// Requires: Leaf is fully in memory and pinned for read or write. +// Return: leafentry if it exists, nullptr otherwise +{ + for (int i = 0; i < leaf->n_children; i++) { + BASEMENTNODE bn = BLB(leaf, i); + if (bn->data_buffer.num_klpairs() > 0) { + // Get the first (leftmost) leafentry and its key + return bn_get_le_and_key(bn, 0, leftmost_key); + } + } + return nullptr; +} + +static LEAFENTRY ft_leaf_rightmost_le_and_key(FTNODE leaf, DBT *rightmost_key) +// Effect: If a rightmost key exists in the given leaf, toku_fill_dbt() +// the key into *rightmost_key +// Requires: Leaf is fully in memory and pinned for read or write. +// Return: leafentry if it exists, nullptr otherwise +{ + for (int i = leaf->n_children - 1; i >= 0; i--) { + BASEMENTNODE bn = BLB(leaf, i); + size_t num_les = bn->data_buffer.num_klpairs(); + if (num_les > 0) { + // Get the last (rightmost) leafentry and its key + return bn_get_le_and_key(bn, num_les - 1, rightmost_key); + } + } + return nullptr; +} + +static int ft_leaf_get_relative_key_pos(FT ft, FTNODE leaf, const DBT *key, bool *nondeleted_key_found, int *target_childnum) +// Effect: Determines what the relative position of the given key is with +// respect to a leaf node, and if it exists. +// Requires: Leaf is fully in memory and pinned for read or write. +// Requires: target_childnum is non-null +// Return: < 0 if key is less than the leftmost key in the leaf OR the relative position is unknown, for any reason. +// 0 if key is in the bounds [leftmost_key, rightmost_key] for this leaf or the leaf is empty +// > 0 if key is greater than the rightmost key in the leaf +// *nondeleted_key_found is set (if non-null) if the target key was found and is not deleted, unmodified otherwise +// *target_childnum is set to the child that (does or would) contain the key, if calculated, unmodified otherwise +{ + DBT rightmost_key; + LEAFENTRY rightmost_le = ft_leaf_rightmost_le_and_key(leaf, &rightmost_key); + if (rightmost_le == nullptr) { + // If we can't get a rightmost key then the leaf is empty. + // In such a case, we don't have any information about what keys would be in this leaf. + // We have to assume the leaf node that would contain this key is to the left. + return -1; + } + // We have a rightmost leafentry, so it must exist in some child node + invariant(leaf->n_children > 0); + + int relative_pos = 0; + int c = ft_compare_keys(ft, key, &rightmost_key); + if (c > 0) { + relative_pos = 1; + *target_childnum = leaf->n_children - 1; + } else if (c == 0) { + if (nondeleted_key_found != nullptr && !le_latest_is_del(rightmost_le)) { + *nondeleted_key_found = true; + } + relative_pos = 0; + *target_childnum = leaf->n_children - 1; + } else { + // The key is less than the rightmost. It may still be in bounds if it's >= the leftmost. + DBT leftmost_key; + LEAFENTRY leftmost_le = ft_leaf_leftmost_le_and_key(leaf, &leftmost_key); + invariant_notnull(leftmost_le); // Must exist because a rightmost exists + c = ft_compare_keys(ft, key, &leftmost_key); + if (c > 0) { + if (nondeleted_key_found != nullptr) { + // The caller wants to know if a nondeleted key can be found. + LEAFENTRY target_le; + int childnum = toku_ftnode_which_child(leaf, key, &ft->cmp_descriptor, ft->compare_fun); + BASEMENTNODE bn = BLB(leaf, childnum); + struct msg_leafval_heaviside_extra extra = { ft->compare_fun, &ft->cmp_descriptor, key }; + int r = bn->data_buffer.find_zero<decltype(extra), toku_msg_leafval_heaviside>( + extra, + &target_le, + nullptr, nullptr, nullptr + ); + *target_childnum = childnum; + if (r == 0 && !le_latest_is_del(leftmost_le)) { + *nondeleted_key_found = true; + } + } + relative_pos = 0; + } else if (c == 0) { + if (nondeleted_key_found != nullptr && !le_latest_is_del(leftmost_le)) { + *nondeleted_key_found = true; + } + relative_pos = 0; + *target_childnum = 0; + } else { + relative_pos = -1; + } + } + + return relative_pos; +} + +static void ft_insert_directly_into_leaf(FT ft, FTNODE leaf, int target_childnum, DBT *key, DBT *val, + XIDS message_xids, enum ft_msg_type type, txn_gc_info *gc_info); +static int getf_nothing(ITEMLEN, bytevec, ITEMLEN, bytevec, void *, bool); + +static int ft_maybe_insert_into_rightmost_leaf(FT ft, DBT *key, DBT *val, XIDS message_xids, enum ft_msg_type type, + txn_gc_info *gc_info, bool unique) +// Effect: Pins the rightmost leaf node and attempts to do an insert. +// There are three reasons why we may not succeed. +// - The rightmost leaf is too full and needs a split. +// - The key to insert is not within the provable bounds of this leaf node. +// - The key is within bounds, but it already exists. +// Return: 0 if this function did insert, DB_KEYEXIST if a unique key constraint exists and +// some nondeleted leafentry with the same key exists +// < 0 if this function did not insert, for a reason other than DB_KEYEXIST. +// Note: Treat this function as a possible, but not necessary, optimization for insert. +// Rationale: We want O(1) insertions down the rightmost path of the tree. +{ + int r = -1; + + uint32_t rightmost_fullhash; + BLOCKNUM rightmost_blocknum = ft->rightmost_blocknum; + FTNODE rightmost_leaf = nullptr; + + // Don't do the optimization if our heurstic suggests that + // insertion pattern is not sequential. + if (ft->seqinsert_score < FT_SEQINSERT_SCORE_THRESHOLD) { + goto cleanup; + } + + // We know the seqinsert score is high enough that we should + // attemp to directly insert into the right most leaf. Because + // the score is non-zero, the rightmost blocknum must have been + // set. See inject_message_in_locked_node(), which only increases + // the score if the target node blocknum == rightmost_blocknum + invariant(rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL); + + // Pin the rightmost leaf with a write lock. + rightmost_fullhash = toku_cachetable_hash(ft->cf, rightmost_blocknum); + struct ftnode_fetch_extra bfe; + fill_bfe_for_full_read(&bfe, ft); + toku_pin_ftnode(ft, rightmost_blocknum, rightmost_fullhash, &bfe, PL_WRITE_CHEAP, &rightmost_leaf, true); + + // The rightmost blocknum never chances once it is initialized to something + // other than null. Verify that the pinned node has the correct blocknum. + invariant(rightmost_leaf->thisnodename.b == rightmost_blocknum.b); + + // If the rightmost leaf is reactive, bail out out and let the normal promotion pass + // take care of it. This also ensures that if any of our ancestors are reactive, + // they'll be taken care of too. + if (get_leaf_reactivity(rightmost_leaf, ft->h->nodesize) != RE_STABLE) { + STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE, 1); + goto cleanup; + } + + // The groundwork has been laid for an insertion directly into the rightmost + // leaf node. We know that it is pinned for write, fully in memory, has + // no messages above it, and is not reactive. + // + // Now, two more things must be true for this insertion to actually happen: + // 1. The key to insert is within the bounds of this leafnode, or to the right. + // 2. If there is a uniqueness constraint, it passes. + bool nondeleted_key_found; + int relative_pos; + int target_childnum; + + nondeleted_key_found = false; + target_childnum = -1; + relative_pos = ft_leaf_get_relative_key_pos(ft, rightmost_leaf, key, + unique ? &nondeleted_key_found : nullptr, + &target_childnum); + if (relative_pos >= 0) { + STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, 1); + if (unique && nondeleted_key_found) { + r = DB_KEYEXIST; + } else { + ft_insert_directly_into_leaf(ft, rightmost_leaf, target_childnum, + key, val, message_xids, type, gc_info); + r = 0; + } + } else { + STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, 1); + r = -1; + } + +cleanup: + // If we did the insert, the rightmost leaf was unpinned for us. + if (r != 0 && rightmost_leaf != nullptr) { + toku_unpin_ftnode(ft, rightmost_leaf); + } + + return r; +} + +static void ft_txn_log_insert(FT ft, DBT *key, DBT *val, TOKUTXN txn, bool do_logging, enum ft_msg_type type); + +int toku_ft_insert_unique(FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool do_logging) { +// Effect: Insert a unique key-val pair into the fractal tree. +// Return: 0 on success, DB_KEYEXIST if the overwrite constraint failed + XIDS message_xids = txn != nullptr ? toku_txn_get_xids(txn) : xids_get_root_xids(); + + TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h); + txn_manager_state txn_state_for_gc(txn_manager); + + TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h); + txn_gc_info gc_info(&txn_state_for_gc, + oldest_referenced_xid_estimate, + // no messages above us, we can implicitly promote uxrs based on this xid + oldest_referenced_xid_estimate, + true); + int r = ft_maybe_insert_into_rightmost_leaf(ft_h->ft, key, val, message_xids, FT_INSERT, &gc_info, true); + if (r != 0 && r != DB_KEYEXIST) { + // Default to a regular unique check + insert algorithm if we couldn't + // do it based on the rightmost leaf alone. + int lookup_r = toku_ft_lookup(ft_h, key, getf_nothing, nullptr); + if (lookup_r == DB_NOTFOUND) { + toku_ft_send_insert(ft_h, key, val, message_xids, FT_INSERT, &gc_info); + r = 0; + } else { + r = DB_KEYEXIST; + } + } + + if (r == 0) { + ft_txn_log_insert(ft_h->ft, key, val, txn, do_logging, FT_INSERT); + } + return r; +} + +// Effect: Insert the key-val pair into an ft. void toku_ft_insert (FT_HANDLE ft_handle, DBT *key, DBT *val, TOKUTXN txn) { toku_ft_maybe_insert(ft_handle, key, val, txn, false, ZERO_LSN, true, FT_INSERT); } @@ -3356,32 +3654,38 @@ TXNID toku_ft_get_oldest_referenced_xid_estimate(FT_HANDLE ft_h) { return txn_manager != nullptr ? toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager) : TXNID_NONE; } -void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type) { - paranoid_invariant(type==FT_INSERT || type==FT_INSERT_NO_OVERWRITE); - XIDS message_xids = xids_get_root_xids(); //By default use committed messages +static void ft_txn_log_insert(FT ft, DBT *key, DBT *val, TOKUTXN txn, bool do_logging, enum ft_msg_type type) { + paranoid_invariant(type == FT_INSERT || type == FT_INSERT_NO_OVERWRITE); + + //By default use committed messages TXNID_PAIR xid = toku_txn_get_txnid(txn); if (txn) { BYTESTRING keybs = {key->size, (char *) key->data}; - toku_logger_save_rollback_cmdinsert(txn, toku_cachefile_filenum(ft_h->ft->cf), &keybs); - toku_txn_maybe_note_ft(txn, ft_h->ft); - message_xids = toku_txn_get_xids(txn); + toku_logger_save_rollback_cmdinsert(txn, toku_cachefile_filenum(ft->cf), &keybs); + toku_txn_maybe_note_ft(txn, ft); } TOKULOGGER logger = toku_txn_logger(txn); if (do_logging && logger) { BYTESTRING keybs = {.len=key->size, .data=(char *) key->data}; BYTESTRING valbs = {.len=val->size, .data=(char *) val->data}; if (type == FT_INSERT) { - toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_h->ft->cf), xid, keybs, valbs); + toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft->cf), xid, keybs, valbs); } else { - toku_log_enq_insert_no_overwrite(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_h->ft->cf), xid, keybs, valbs); + toku_log_enq_insert_no_overwrite(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft->cf), xid, keybs, valbs); } } +} + +void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type) { + ft_txn_log_insert(ft_h->ft, key, val, txn, do_logging, type); LSN treelsn; if (oplsn_valid && oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) { // do nothing } else { + XIDS message_xids = txn ? toku_txn_get_xids(txn) : xids_get_root_xids(); + TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h); txn_manager_state txn_state_for_gc(txn_manager); @@ -3391,10 +3695,26 @@ void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool // no messages above us, we can implicitly promote uxrs based on this xid oldest_referenced_xid_estimate, txn != nullptr ? !txn->for_recovery : false); - toku_ft_send_insert(ft_h, key, val, message_xids, type, &gc_info); + int r = ft_maybe_insert_into_rightmost_leaf(ft_h->ft, key, val, message_xids, FT_INSERT, &gc_info, false); + if (r != 0) { + toku_ft_send_insert(ft_h, key, val, message_xids, type, &gc_info); + } } } +static void ft_insert_directly_into_leaf(FT ft, FTNODE leaf, int target_childnum, DBT *key, DBT *val, + XIDS message_xids, enum ft_msg_type type, txn_gc_info *gc_info) +// Effect: Insert directly into a leaf node a fractal tree. Does not do any logging. +// Requires: Leaf is fully in memory and pinned for write. +// Requires: If this insertion were to happen through the root node, the promotion +// algorithm would have selected the given leaf node as the point of injection. +// That means this function relies on the current implementation of promotion. +{ + FT_MSG_S ftcmd = { type, ZERO_MSN, message_xids, .u = { .id = { key, val } } }; + size_t flow_deltas[] = { 0, 0 }; + inject_message_in_locked_node(ft, leaf, target_childnum, &ftcmd, flow_deltas, gc_info); +} + static void ft_send_update_msg(FT_HANDLE ft_h, FT_MSG_S *msg, TOKUTXN txn) { msg->xids = (txn @@ -4894,6 +5214,13 @@ int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_ return 0; } +static void ft_bnc_move_messages_to_stale(FT ft, NONLEAF_CHILDINFO bnc) { + struct copy_to_stale_extra cts_extra = { .ft = ft, .bnc = bnc }; + int r = bnc->fresh_message_tree.iterate_over_marked<struct copy_to_stale_extra, copy_to_stale>(&cts_extra); + invariant_zero(r); + bnc->fresh_message_tree.delete_all_marked(); +} + __attribute__((nonnull)) void toku_move_ftnode_messages_to_stale(FT ft, FTNODE node) { @@ -4906,10 +5233,7 @@ toku_move_ftnode_messages_to_stale(FT ft, FTNODE node) { // We can't delete things out of the fresh tree inside the above // procedures because we're still looking at the fresh tree. Instead // we have to move messages after we're done looking at it. - struct copy_to_stale_extra cts_extra = { .ft = ft, .bnc = bnc }; - int r = bnc->fresh_message_tree.iterate_over_marked<struct copy_to_stale_extra, copy_to_stale>(&cts_extra); - invariant_zero(r); - bnc->fresh_message_tree.delete_all_marked(); + ft_bnc_move_messages_to_stale(ft, bnc); } } diff --git a/storage/tokudb/ft-index/ft/ft-ops.h b/storage/tokudb/ft-index/ft/ft-ops.h index b482d2b8206..cfa6ba20f6f 100644 --- a/storage/tokudb/ft-index/ft/ft-ops.h +++ b/storage/tokudb/ft-index/ft/ft-ops.h @@ -213,6 +213,9 @@ int toku_ft_lookup (FT_HANDLE ft_h, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void // Effect: Insert a key and data pair into an ft void toku_ft_insert (FT_HANDLE ft_h, DBT *k, DBT *v, TOKUTXN txn); +// Returns: 0 if the key was inserted, DB_KEYEXIST if the key already exists +int toku_ft_insert_unique(FT_HANDLE ft, DBT *k, DBT *v, TOKUTXN txn, bool do_logging); + // Effect: Optimize the ft void toku_ft_optimize (FT_HANDLE ft_h); diff --git a/storage/tokudb/ft-index/ft/ft-serialize.cc b/storage/tokudb/ft-index/ft/ft-serialize.cc index 4a4817e7f6c..1879561f20a 100644 --- a/storage/tokudb/ft-index/ft/ft-serialize.cc +++ b/storage/tokudb/ft-index/ft/ft-serialize.cc @@ -462,6 +462,7 @@ serialize_ft_min_size (uint32_t version) { size_t size = 0; switch(version) { + case FT_LAYOUT_VERSION_27: case FT_LAYOUT_VERSION_26: case FT_LAYOUT_VERSION_25: case FT_LAYOUT_VERSION_24: diff --git a/storage/tokudb/ft-index/ft/ft-verify.cc b/storage/tokudb/ft-index/ft/ft-verify.cc index 506a54a07a0..7e8d241cce2 100644 --- a/storage/tokudb/ft-index/ft/ft-verify.cc +++ b/storage/tokudb/ft-index/ft/ft-verify.cc @@ -310,7 +310,7 @@ toku_get_node_for_verify( static int toku_verify_ftnode_internal(FT_HANDLE ft_handle, - MSN rootmsn, MSN parentmsn, bool messages_exist_above, + MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above, FTNODE node, int height, const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) @@ -328,7 +328,7 @@ toku_verify_ftnode_internal(FT_HANDLE ft_handle, invariant(height == node->height); // this is a bad failure if wrong } if (node->height > 0 && messages_exist_above) { - VERIFY_ASSERTION((parentmsn.msn >= this_msn.msn), 0, "node msn must be descending down tree, newest messages at top"); + VERIFY_ASSERTION((parentmsn_with_messages.msn >= this_msn.msn), 0, "node msn must be descending down tree, newest messages at top"); } // Verify that all the pivot keys are in order. for (int i = 0; i < node->n_children-2; i++) { @@ -450,7 +450,7 @@ done: // input is a pinned node, on exit, node is unpinned int toku_verify_ftnode (FT_HANDLE ft_handle, - MSN rootmsn, MSN parentmsn, bool messages_exist_above, + MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above, FTNODE node, int height, const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) @@ -469,7 +469,7 @@ toku_verify_ftnode (FT_HANDLE ft_handle, // Otherwise we'll just do the next call result = toku_verify_ftnode_internal( - ft_handle, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, + ft_handle, rootmsn, parentmsn_with_messages, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, verbose, keep_going_on_failure, false); if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done; } @@ -477,7 +477,7 @@ toku_verify_ftnode (FT_HANDLE ft_handle, toku_move_ftnode_messages_to_stale(ft_handle->ft, node); } result2 = toku_verify_ftnode_internal( - ft_handle, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, + ft_handle, rootmsn, parentmsn_with_messages, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, verbose, keep_going_on_failure, true); if (result == 0) { result = result2; @@ -489,12 +489,16 @@ toku_verify_ftnode (FT_HANDLE ft_handle, for (int i = 0; i < node->n_children; i++) { FTNODE child_node; toku_get_node_for_verify(BP_BLOCKNUM(node, i), ft_handle, &child_node); - int r = toku_verify_ftnode(ft_handle, rootmsn, this_msn, messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0, - child_node, node->height-1, - (i==0) ? lesser_pivot : &node->childkeys[i-1], - (i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i], - progress_callback, progress_extra, - recurse, verbose, keep_going_on_failure); + int r = toku_verify_ftnode(ft_handle, rootmsn, + (toku_bnc_n_entries(BNC(node, i)) > 0 + ? this_msn + : parentmsn_with_messages), + messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0, + child_node, node->height-1, + (i==0) ? lesser_pivot : &node->childkeys[i-1], + (i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i], + progress_callback, progress_extra, + recurse, verbose, keep_going_on_failure); if (r) { result = r; if (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR) goto done; diff --git a/storage/tokudb/ft-index/ft/ft_layout_version.h b/storage/tokudb/ft-index/ft/ft_layout_version.h index e9c6a68328b..01c7363e98d 100644 --- a/storage/tokudb/ft-index/ft/ft_layout_version.h +++ b/storage/tokudb/ft-index/ft/ft_layout_version.h @@ -120,6 +120,7 @@ enum ft_layout_version_e { FT_LAYOUT_VERSION_24 = 24, // Riddler: change logentries that log transactions to store TXNID_PAIRs instead of TXNIDs FT_LAYOUT_VERSION_25 = 25, // SecretSquirrel: ROLLBACK_LOG_NODES (on disk and in memory) now just use blocknum (instead of blocknum + hash) to point to other log nodes. same for xstillopen log entry FT_LAYOUT_VERSION_26 = 26, // Hojo: basements store key/vals separately on disk for fixed klpair length BNs + FT_LAYOUT_VERSION_27 = 27, // serialize message trees with nonleaf buffers to avoid key, msn sort on deserialize FT_NEXT_VERSION, // the version after the current version FT_LAYOUT_VERSION = FT_NEXT_VERSION-1, // A hack so I don't have to change this line. FT_LAYOUT_MIN_SUPPORTED_VERSION = FT_LAYOUT_VERSION_13, // Minimum version supported diff --git a/storage/tokudb/ft-index/ft/ft_node-serialize.cc b/storage/tokudb/ft-index/ft/ft_node-serialize.cc index fcb38f11834..91ea0890c30 100644 --- a/storage/tokudb/ft-index/ft/ft_node-serialize.cc +++ b/storage/tokudb/ft-index/ft/ft_node-serialize.cc @@ -291,8 +291,13 @@ serialize_ftnode_partition_size (FTNODE node, int i) paranoid_invariant(node->bp[i].state == PT_AVAIL); result++; // Byte that states what the partition is if (node->height > 0) { - result += 4; // size of bytes in buffer table - result += toku_bnc_nbytesinbuf(BNC(node, i)); + NONLEAF_CHILDINFO bnc = BNC(node, i); + // number of messages (4 bytes) plus size of the buffer + result += (4 + toku_bnc_nbytesinbuf(bnc)); + // number of offsets (4 bytes) plus an array of 4 byte offsets, for each message tree + result += (4 + (4 * bnc->fresh_message_tree.size())); + result += (4 + (4 * bnc->stale_message_tree.size())); + result += (4 + (4 * bnc->broadcast_list.size())); } else { result += 4 + bn_data::HEADER_LENGTH; // n_entries in buffer table + basement header @@ -305,8 +310,35 @@ serialize_ftnode_partition_size (FTNODE node, int i) #define FTNODE_PARTITION_DMT_LEAVES 0xaa #define FTNODE_PARTITION_FIFO_MSG 0xbb +UU() static int +assert_fresh(const int32_t &offset, const uint32_t UU(idx), struct fifo *const f) { + struct fifo_entry *entry = toku_fifo_get_entry(f, offset); + assert(entry->is_fresh); + return 0; +} + +UU() static int +assert_stale(const int32_t &offset, const uint32_t UU(idx), struct fifo *const f) { + struct fifo_entry *entry = toku_fifo_get_entry(f, offset); + assert(!entry->is_fresh); + return 0; +} + +static void bnc_verify_message_trees(NONLEAF_CHILDINFO UU(bnc)) { +#ifdef TOKU_DEBUG_PARANOID + bnc->fresh_message_tree.iterate<struct fifo, assert_fresh>(bnc->buffer); + bnc->stale_message_tree.iterate<struct fifo, assert_stale>(bnc->buffer); +#endif +} + +static int +wbuf_write_offset(const int32_t &offset, const uint32_t UU(idx), struct wbuf *const wb) { + wbuf_nocrc_int(wb, offset); + return 0; +} + static void -serialize_nonleaf_childinfo(NONLEAF_CHILDINFO bnc, struct wbuf *wb) +serialize_child_buffer(NONLEAF_CHILDINFO bnc, struct wbuf *wb) { unsigned char ch = FTNODE_PARTITION_FIFO_MSG; wbuf_nocrc_char(wb, ch); @@ -323,6 +355,19 @@ serialize_nonleaf_childinfo(NONLEAF_CHILDINFO bnc, struct wbuf *wb) wbuf_nocrc_bytes(wb, key, keylen); wbuf_nocrc_bytes(wb, data, datalen); }); + + bnc_verify_message_trees(bnc); + + // serialize the message trees (num entries, offsets array): + // fresh, stale, broadcast + wbuf_nocrc_int(wb, bnc->fresh_message_tree.size()); + bnc->fresh_message_tree.iterate<struct wbuf, wbuf_write_offset>(wb); + + wbuf_nocrc_int(wb, bnc->stale_message_tree.size()); + bnc->stale_message_tree.iterate<struct wbuf, wbuf_write_offset>(wb); + + wbuf_nocrc_int(wb, bnc->broadcast_list.size()); + bnc->broadcast_list.iterate<struct wbuf, wbuf_write_offset>(wb); } // @@ -346,7 +391,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) { wbuf_init(&wb, sb->uncompressed_ptr, sb->uncompressed_size); if (node->height > 0) { // TODO: (Zardosht) possibly exit early if there are no messages - serialize_nonleaf_childinfo(BNC(node, i), &wb); + serialize_child_buffer(BNC(node, i), &wb); } else { unsigned char ch = FTNODE_PARTITION_DMT_LEAVES; @@ -1024,8 +1069,8 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA } static void -deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, - DESCRIPTOR desc, ft_compare_func cmp) { +deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, + DESCRIPTOR desc, ft_compare_func cmp) { int r; int n_in_this_buffer = rbuf_int(rbuf); int32_t *fresh_offsets = NULL, *stale_offsets = NULL; @@ -1090,6 +1135,68 @@ deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, } } +// effect: deserialize a single message from rbuf and enqueue the result into the given fifo +static void +fifo_deserialize_msg_from_rbuf(FIFO fifo, struct rbuf *rbuf) { + bytevec key, val; + ITEMLEN keylen, vallen; + enum ft_msg_type type = (enum ft_msg_type) rbuf_char(rbuf); + bool is_fresh = rbuf_char(rbuf); + MSN msn = rbuf_msn(rbuf); + XIDS xids; + xids_create_from_buffer(rbuf, &xids); + rbuf_bytes(rbuf, &key, &keylen); /* Returns a pointer into the rbuf. */ + rbuf_bytes(rbuf, &val, &vallen); + int r = toku_fifo_enq(fifo, key, keylen, val, vallen, type, msn, xids, is_fresh, nullptr); + lazy_assert_zero(r); + xids_destroy(&xids); +} + +static void +deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf) { + int n_in_this_buffer = rbuf_int(rbuf); + int nfresh = 0, nstale = 0, nbroadcast_offsets = 0; + int32_t *XMALLOC_N(n_in_this_buffer, stale_offsets); + int32_t *XMALLOC_N(n_in_this_buffer, fresh_offsets); + int32_t *XMALLOC_N(n_in_this_buffer, broadcast_offsets); + + toku_fifo_resize(bnc->buffer, rbuf->size + 64); + for (int i = 0; i < n_in_this_buffer; i++) { + fifo_deserialize_msg_from_rbuf(bnc->buffer, rbuf); + } + + // read in each message tree (fresh, stale, broadcast) + nfresh = rbuf_int(rbuf); + bytevec fresh_offsets_src_v; + rbuf_literal_bytes(rbuf, &fresh_offsets_src_v, nfresh * (sizeof *fresh_offsets)); + const int32_t *fresh_offsets_src = (const int32_t *) fresh_offsets_src_v; + for (int i = 0; i < nfresh; i++) { + fresh_offsets[i] = toku_dtoh32(fresh_offsets_src[i]); + } + nstale = rbuf_int(rbuf); + bytevec stale_offsets_src_v; + rbuf_literal_bytes(rbuf, &stale_offsets_src_v, nstale * (sizeof *stale_offsets)); + const int32_t *stale_offsets_src = (const int32_t *) stale_offsets_src_v; + for (int i = 0; i < nstale; i++) { + stale_offsets[i] = toku_dtoh32(stale_offsets_src[i]); + } + nbroadcast_offsets = rbuf_int(rbuf); + bytevec broadcast_offsets_src_v; + rbuf_literal_bytes(rbuf, &broadcast_offsets_src_v, nbroadcast_offsets * (sizeof *broadcast_offsets)); + const int32_t *broadcast_offsets_src = (const int32_t *) broadcast_offsets_src_v; + for (int i = 0; i < nbroadcast_offsets; i++) { + broadcast_offsets[i] = toku_dtoh32(broadcast_offsets_src[i]); + } + + // build OMTs out of each offset array + bnc->fresh_message_tree.destroy(); + bnc->fresh_message_tree.create_steal_sorted_array(&fresh_offsets, nfresh, n_in_this_buffer); + bnc->stale_message_tree.destroy(); + bnc->stale_message_tree.create_steal_sorted_array(&stale_offsets, nstale, n_in_this_buffer); + bnc->broadcast_list.destroy(); + bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast_offsets, n_in_this_buffer); +} + // dump a buffer to stderr // no locking around this for now void @@ -1161,13 +1268,16 @@ NONLEAF_CHILDINFO toku_create_empty_nl(void) { return cn; } -// does NOT create OMTs, just the FIFO +// must clone the OMTs, since we serialize them along with the FIFO NONLEAF_CHILDINFO toku_clone_nl(NONLEAF_CHILDINFO orig_childinfo) { NONLEAF_CHILDINFO XMALLOC(cn); toku_fifo_clone(orig_childinfo->buffer, &cn->buffer); cn->fresh_message_tree.create_no_array(); + cn->fresh_message_tree.clone(orig_childinfo->fresh_message_tree); cn->stale_message_tree.create_no_array(); + cn->stale_message_tree.clone(orig_childinfo->stale_message_tree); cn->broadcast_list.create_no_array(); + cn->broadcast_list.clone(orig_childinfo->broadcast_list); memset(cn->flow, 0, sizeof cn->flow); return cn; } @@ -1513,7 +1623,13 @@ deserialize_ftnode_partition( if (node->height > 0) { assert(ch == FTNODE_PARTITION_FIFO_MSG); - deserialize_child_buffer(BNC(node, childnum), &rb, desc, cmp); + NONLEAF_CHILDINFO bnc = BNC(node, childnum); + if (node->layout_version_read_from_disk <= FT_LAYOUT_VERSION_26) { + // Layout version <= 26 did not serialize sorted message trees to disk. + deserialize_child_buffer_v26(bnc, &rb, desc, cmp); + } else { + deserialize_child_buffer(bnc, &rb); + } BP_WORKDONE(node, childnum) = 0; } else { diff --git a/storage/tokudb/ft-index/ft/ftloader-internal.h b/storage/tokudb/ft-index/ft/ftloader-internal.h index be1ded59890..d60537490dd 100644 --- a/storage/tokudb/ft-index/ft/ftloader-internal.h +++ b/storage/tokudb/ft-index/ft/ftloader-internal.h @@ -245,6 +245,7 @@ struct ft_loader_s { CACHETABLE cachetable; bool did_reserve_memory; bool compress_intermediates; + bool allow_puts; uint64_t reserved_memory; // how much memory are we allowed to use? /* To make it easier to recover from errors, we don't use FILE*, instead we use an index into the file_infos. */ @@ -346,7 +347,8 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates); + bool compress_intermediates, + bool allow_puts); void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error); diff --git a/storage/tokudb/ft-index/ft/ftloader.cc b/storage/tokudb/ft-index/ft/ftloader.cc index 2df6d0a1cda..67b3cf9905e 100644 --- a/storage/tokudb/ft-index/ft/ftloader.cc +++ b/storage/tokudb/ft-index/ft/ftloader.cc @@ -356,6 +356,8 @@ int ft_loader_open_temp_file (FTLOADER bl, FIDX *file_idx) */ { int result = 0; + if (result) // debug hack + return result; FILE *f = NULL; int fd = -1; char *fname = toku_strdup(bl->temp_file_template); @@ -420,6 +422,10 @@ void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error) { } destroy_rowset(&bl->primary_rowset); + if (bl->primary_rowset_queue) { + queue_destroy(bl->primary_rowset_queue); + bl->primary_rowset_queue = nullptr; + } for (int i=0; i<bl->N; i++) { if ( bl->fractal_queues ) { @@ -543,7 +549,8 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates) + bool compress_intermediates, + bool allow_puts) // Effect: Allocate and initialize a FTLOADER, but do not create the extractor thread. { FTLOADER CALLOC(bl); // initialized to all zeros (hence CALLOC) @@ -560,10 +567,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, bl->reserved_memory = 512*1024*1024; // if no cache table use 512MB. } bl->compress_intermediates = compress_intermediates; - if (0) { // debug - fprintf(stderr, "%s Reserved memory=%" PRId64 "\n", __FUNCTION__, bl->reserved_memory); - } - + bl->allow_puts = allow_puts; bl->src_db = src_db; bl->N = N; bl->load_lsn = load_lsn; @@ -628,7 +632,6 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, { int r = queue_create(&bl->primary_rowset_queue, EXTRACTOR_QUEUE_DEPTH); if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; } } - //printf("%s:%d toku_pthread_create\n", __FILE__, __LINE__); { ft_loader_lock_init(bl); } @@ -650,34 +653,38 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates) -/* Effect: called by DB_ENV->create_loader to create an ft loader. - * Arguments: - * blp Return the ft loader here. - * g The function for generating a row - * src_db The source database. Needed by g. May be NULL if that's ok with g. - * N The number of dbs to create. - * dbs An array of open databases. Used by g. The data will be put in these database. - * new_fnames The file names (these strings are owned by the caller: we make a copy for our own purposes). - * temp_file_template A template suitable for mkstemp() - * Return value: 0 on success, an error number otherwise. - */ -{ + bool compress_intermediates, + bool allow_puts) { +// Effect: called by DB_ENV->create_loader to create a brt loader. +// Arguments: +// blp Return the brt loader here. +// g The function for generating a row +// src_db The source database. Needed by g. May be NULL if that's ok with g. +// N The number of dbs to create. +// dbs An array of open databases. Used by g. The data will be put in these database. +// new_fnames The file names (these strings are owned by the caller: we make a copy for our own purposes). +// temp_file_template A template suitable for mkstemp() +// reserve_memory Cause the loader to reserve memory for its use from the cache table. +// compress_intermediates Cause the loader to compress intermediate loader files. +// allow_puts Prepare the loader for rows to insert. When puts are disabled, the loader does not run the +// extractor or the fractal tree writer threads. +// Return value: 0 on success, an error number otherwise. int result = 0; { int r = toku_ft_loader_internal_init(blp, cachetable, g, src_db, - N, fts, dbs, - new_fnames_in_env, - bt_compare_functions, - temp_file_template, - load_lsn, - txn, - reserve_memory, - reserve_memory_size, - compress_intermediates); + N, fts, dbs, + new_fnames_in_env, + bt_compare_functions, + temp_file_template, + load_lsn, + txn, + reserve_memory, + reserve_memory_size, + compress_intermediates, + allow_puts); if (r!=0) result = r; } - if (result==0) { + if (result==0 && allow_puts) { FTLOADER bl = *blp; int r = toku_pthread_create(&bl->extractor_thread, NULL, extractor_thread, (void*)bl); if (r==0) { @@ -1213,6 +1220,7 @@ finish_extractor (FTLOADER bl) { { int r = queue_destroy(bl->primary_rowset_queue); invariant(r==0); + bl->primary_rowset_queue = nullptr; } rval = ft_loader_fi_close_all(&bl->file_infos); @@ -1374,10 +1382,9 @@ int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val) * Return value: 0 on success, an error number otherwise. */ { - if (ft_loader_get_error(&bl->error_callback)) + if (!bl->allow_puts || ft_loader_get_error(&bl->error_callback)) return EINVAL; // previous panic bl->n_rows++; -// return loader_write_row(key, val, bl->fprimary_rows, &bl->fprimary_offset, bl); return loader_do_put(bl, key, val); } @@ -2425,6 +2432,8 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, if (r) { result = r; drain_writer_q(q); + r = toku_os_close(fd); + assert_zero(r); return result; } FILE *pivots_stream = toku_bl_fidx2file(bl, pivots_file); @@ -2714,12 +2723,7 @@ static int loader_do_i (FTLOADER bl, struct rowset *rows = &(bl->rows[which_db]); invariant(rows->data==NULL); // the rows should be all cleaned up already - // a better allocation would be to figure out roughly how many merge passes we'll need. - int allocation_for_merge = (2*progress_allocation)/3; - progress_allocation -= allocation_for_merge; - - int r; - r = queue_create(&bl->fractal_queues[which_db], FRACTAL_WRITER_QUEUE_DEPTH); + int r = queue_create(&bl->fractal_queues[which_db], FRACTAL_WRITER_QUEUE_DEPTH); if (r) goto error; { @@ -2740,49 +2744,62 @@ static int loader_do_i (FTLOADER bl, r = dest_db->get_fanout(dest_db, &target_fanout); invariant_zero(r); - // This structure must stay live until the join below. - struct fractal_thread_args fta = { bl, - descriptor, - fd, - progress_allocation, - bl->fractal_queues[which_db], - bl->extracted_datasizes[which_db], - 0, - which_db, - target_nodesize, - target_basementnodesize, - target_compression_method, - target_fanout - }; - - r = toku_pthread_create(bl->fractal_threads+which_db, NULL, fractal_thread, (void*)&fta); - if (r) { - int r2 __attribute__((__unused__)) = queue_destroy(bl->fractal_queues[which_db]); - // ignore r2, since we already have an error - goto error; - } - invariant(bl->fractal_threads_live[which_db]==false); - bl->fractal_threads_live[which_db] = true; + if (bl->allow_puts) { + // a better allocation would be to figure out roughly how many merge passes we'll need. + int allocation_for_merge = (2*progress_allocation)/3; + progress_allocation -= allocation_for_merge; + + // This structure must stay live until the join below. + struct fractal_thread_args fta = { + bl, + descriptor, + fd, + progress_allocation, + bl->fractal_queues[which_db], + bl->extracted_datasizes[which_db], + 0, + which_db, + target_nodesize, + target_basementnodesize, + target_compression_method, + target_fanout + }; + + r = toku_pthread_create(bl->fractal_threads+which_db, NULL, fractal_thread, (void*)&fta); + if (r) { + int r2 __attribute__((__unused__)) = queue_destroy(bl->fractal_queues[which_db]); + // ignore r2, since we already have an error + bl->fractal_queues[which_db] = nullptr; + goto error; + } + invariant(bl->fractal_threads_live[which_db]==false); + bl->fractal_threads_live[which_db] = true; - r = merge_files(fs, bl, which_db, dest_db, compare, allocation_for_merge, bl->fractal_queues[which_db]); + r = merge_files(fs, bl, which_db, dest_db, compare, allocation_for_merge, bl->fractal_queues[which_db]); - { - void *toku_pthread_retval; - int r2 = toku_pthread_join(bl->fractal_threads[which_db], &toku_pthread_retval); - invariant(fta.bl==bl); // this is a gratuitous assertion to make sure that the fta struct is still live here. A previous bug but that struct into a C block statement. - resource_assert_zero(r2); - invariant(toku_pthread_retval==NULL); - invariant(bl->fractal_threads_live[which_db]); - bl->fractal_threads_live[which_db] = false; - if (r == 0) r = fta.errno_result; + { + void *toku_pthread_retval; + int r2 = toku_pthread_join(bl->fractal_threads[which_db], &toku_pthread_retval); + invariant(fta.bl==bl); // this is a gratuitous assertion to make sure that the fta struct is still live here. A previous bug put that struct into a C block statement. + resource_assert_zero(r2); + invariant(toku_pthread_retval==NULL); + invariant(bl->fractal_threads_live[which_db]); + bl->fractal_threads_live[which_db] = false; + if (r == 0) r = fta.errno_result; + } + } else { + queue_eof(bl->fractal_queues[which_db]); + r = toku_loader_write_ft_from_q(bl, descriptor, fd, progress_allocation, + bl->fractal_queues[which_db], bl->extracted_datasizes[which_db], which_db, + target_nodesize, target_basementnodesize, target_compression_method, target_fanout); } } error: // this is the cleanup code. Even if r==0 (no error) we fall through to here. - { + if (bl->fractal_queues[which_db]) { int r2 = queue_destroy(bl->fractal_queues[which_db]); invariant(r2==0); - bl->fractal_queues[which_db]=NULL; + bl->fractal_queues[which_db] = nullptr; } // if we get here we need to free up the merge_fileset and the rowset, as well as the keys @@ -2851,6 +2868,10 @@ int toku_ft_loader_close (FTLOADER bl, if (r) result = r; invariant(!bl->extractor_live); + } else { + r = finish_primary_rows(bl); + if (r) + result = r; } // check for an error during extraction diff --git a/storage/tokudb/ft-index/ft/ftloader.h b/storage/tokudb/ft-index/ft/ftloader.h index c3376c90e91..c920b4c5362 100644 --- a/storage/tokudb/ft-index/ft/ftloader.h +++ b/storage/tokudb/ft-index/ft/ftloader.h @@ -113,7 +113,8 @@ int toku_ft_loader_open (FTLOADER *bl, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates); + bool compress_intermediates, + bool allow_puts); int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val); diff --git a/storage/tokudb/ft-index/ft/log_upgrade.cc b/storage/tokudb/ft-index/ft/log_upgrade.cc index e5a36a88cff..8dba57e9d8d 100644 --- a/storage/tokudb/ft-index/ft/log_upgrade.cc +++ b/storage/tokudb/ft-index/ft/log_upgrade.cc @@ -321,8 +321,8 @@ toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_cl r = 0; //Logs are up to date else { FOOTPRINT(4); - LSN last_lsn; - TXNID last_xid; + LSN last_lsn = ZERO_LSN; + TXNID last_xid = TXNID_NONE; r = verify_clean_shutdown_of_log_version(log_dir, version_of_logs_on_disk, &last_lsn, &last_xid); if (r != 0) { goto cleanup; diff --git a/storage/tokudb/ft-index/ft/logger.cc b/storage/tokudb/ft-index/ft/logger.cc index e4fd854c637..bbac5cf7de3 100644 --- a/storage/tokudb/ft-index/ft/logger.cc +++ b/storage/tokudb/ft-index/ft/logger.cc @@ -621,7 +621,7 @@ int toku_logger_find_next_unused_log_file(const char *directory, long long *resu if (d==0) return get_error_errno(); while ((de=readdir(d))) { if (de==0) return get_error_errno(); - long long thisl; + long long thisl = -1; if ( is_a_logfile(de->d_name, &thisl) ) { if ((long long)thisl > maxf) maxf = thisl; } diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc index 1ecae89da78..9ae24f7c4ec 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc @@ -170,7 +170,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail) { } FTLOADER loader; - r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false, true); assert(r == 0); struct rowset *rowset[nrowsets]; diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc index 4dcd7fb2f8c..007fd39fe08 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc @@ -180,7 +180,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char sprintf(temp, "%s/%s", testdir, "tempXXXXXX"); FTLOADER loader; - r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false, true); assert(r == 0); struct rowset *rowset[nrowsets]; diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc index 0a8ce157269..afba44a7a22 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc @@ -402,7 +402,7 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) { sprintf(temp, "%s/%s", testdir, "tempXXXXXX"); FTLOADER loader; - r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, temp, ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, temp, ZERO_LSN, nullptr, true, 0, false, true); assert(r == 0); struct rowset *rowset[nrowsets]; diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc index 82583595470..cdd4c1d6691 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc @@ -412,7 +412,7 @@ static void test (const char *directory, bool is_error) { bt_compare_functions, "tempxxxxxx", *lsnp, - nullptr, true, 0, false); + nullptr, true, 0, false, true); assert(r==0); } @@ -500,11 +500,6 @@ static void test (const char *directory, bool is_error) { assert(cthunk.n_read == N_RECORDS); } } - //printf("%s:%d Destroying\n", __FILE__, __LINE__); - { - int r = queue_destroy(bl->primary_rowset_queue); - assert(r==0); - } { int r = queue_destroy(q); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc index f2919f04d3d..cdf0a14ab00 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc @@ -143,7 +143,7 @@ static void test_loader_open(int ndbs) { for (i = 0; ; i++) { set_my_malloc_trigger(i+1); - r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, fts, dbs, fnames, compares, "", ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, fts, dbs, fnames, compares, "", ZERO_LSN, nullptr, true, 0, false, true); if (r == 0) break; } diff --git a/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc new file mode 100644 index 00000000000..100e5153636 --- /dev/null +++ b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc @@ -0,0 +1,183 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." + +#include "test.h" + +#include <ft/ybt.h> +#include <ft/ft-cachetable-wrappers.h> + +// Each FT maintains a sequential insert heuristic to determine if its +// worth trying to insert directly into a well-known rightmost leaf node. +// +// The heuristic is only maintained when a rightmost leaf node is known. +// +// This test verifies that sequential inserts increase the seqinsert score +// and that a single non-sequential insert resets the score. + +static void test_seqinsert_heuristic(void) { + int r = 0; + char name[TOKU_PATH_MAX + 1]; + toku_path_join(name, 2, TOKU_TEST_FILENAME, "ftdata"); + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r); + + FT_HANDLE ft_handle; + CACHETABLE ct; + toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + r = toku_open_ft_handle(name, 1, &ft_handle, + 4*1024*1024, 64*1024, + TOKU_DEFAULT_COMPRESSION_METHOD, ct, NULL, + toku_builtin_compare_fun); CKERR(r); + FT ft = ft_handle->ft; + + int k; + DBT key, val; + const int val_size = 1024 * 1024; + char *XMALLOC_N(val_size, val_buf); + memset(val_buf, 'x', val_size); + toku_fill_dbt(&val, val_buf, val_size); + + // Insert many rows sequentially. This is enough data to: + // - force the root to split (the righmost leaf will then be known) + // - raise the seqinsert score high enough to enable direct rightmost injections + const int rows_to_insert = 200; + for (int i = 0; i < rows_to_insert; i++) { + k = toku_htonl(i); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + } + invariant(ft->rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL); + invariant(ft->seqinsert_score == FT_SEQINSERT_SCORE_THRESHOLD); + + // Insert on the left extreme. The seq insert score is high enough + // that we will attempt to insert into the rightmost leaf. We won't + // be successful because key 0 won't be in the bounds of the rightmost leaf. + // This failure should reset the seqinsert score back to 0. + k = toku_htonl(0); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 0); + + // Insert in the middle. The score should not go up. + k = toku_htonl(rows_to_insert / 2); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 0); + + // Insert on the right extreme. The score should go up. + k = toku_htonl(rows_to_insert); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 1); + + // Insert again on the right extreme again, the score should go up. + k = toku_htonl(rows_to_insert + 1); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 2); + + // Insert close to, but not at, the right extreme. The score should reset. + // -- the magic number 4 derives from the fact that vals are 1mb and nodes are 4mb + k = toku_htonl(rows_to_insert - 4); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 0); + + toku_free(val_buf); + toku_ft_handle_close(ft_handle); + toku_cachetable_close(&ct); + toku_os_recursive_delete(TOKU_TEST_FILENAME); +} + +int test_main(int argc, const char *argv[]) { + default_parse_args(argc, argv); + test_seqinsert_heuristic(); + return 0; +} diff --git a/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_split_merge.cc b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_split_merge.cc new file mode 100644 index 00000000000..517fc277fd3 --- /dev/null +++ b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_split_merge.cc @@ -0,0 +1,212 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." + +#include "test.h" + +#include <ft/ybt.h> +#include <ft/ft-cachetable-wrappers.h> + +// Promotion tracks the rightmost blocknum in the FT when a message +// is successfully promoted to a non-root leaf node on the right extreme. +// +// This test verifies that a split or merge of the rightmost leaf properly +// maintains the rightmost blocknum (which is constant - the pair's swap values, +// like the root blocknum). + +static void test_split_merge(void) { + int r = 0; + char name[TOKU_PATH_MAX + 1]; + toku_path_join(name, 2, TOKU_TEST_FILENAME, "ftdata"); + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r); + + FT_HANDLE ft_handle; + CACHETABLE ct; + toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + r = toku_open_ft_handle(name, 1, &ft_handle, + 4*1024*1024, 64*1024, + TOKU_DEFAULT_COMPRESSION_METHOD, ct, NULL, + toku_builtin_compare_fun); CKERR(r); + + // We have a root blocknum, but no rightmost blocknum yet. + FT ft = ft_handle->ft; + invariant(ft->h->root_blocknum.b != RESERVED_BLOCKNUM_NULL); + invariant(ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL); + + int k; + DBT key, val; + const int val_size = 1 * 1024 * 1024; + char *XMALLOC_N(val_size, val_buf); + memset(val_buf, 'x', val_size); + toku_fill_dbt(&val, val_buf, val_size); + + // Insert 16 rows (should induce a few splits) + const int rows_to_insert = 16; + for (int i = 0; i < rows_to_insert; i++) { + k = toku_htonl(i); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + } + + // rightmost blocknum should be set, because the root split and promotion + // did a rightmost insertion directly into the rightmost leaf, lazily + // initializing the rightmost blocknum. + invariant(ft->rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL); + + BLOCKNUM root_blocknum = ft->h->root_blocknum; + FTNODE root_node; + struct ftnode_fetch_extra bfe; + fill_bfe_for_full_read(&bfe, ft); + toku_pin_ftnode(ft, root_blocknum, + toku_cachetable_hash(ft->cf, ft->h->root_blocknum), + &bfe, PL_WRITE_EXPENSIVE, &root_node, true); + // root blocknum should be consistent + invariant(root_node->thisnodename.b == ft->h->root_blocknum.b); + // root should have split at least once, and it should now be at height 1 + invariant(root_node->n_children > 1); + invariant(root_node->height == 1); + // rightmost blocknum should no longer be the root, since the root split + invariant(ft->h->root_blocknum.b != ft->rightmost_blocknum.b); + // the right child should have the rightmost blocknum + invariant(BP_BLOCKNUM(root_node, root_node->n_children - 1).b == ft->rightmost_blocknum.b); + + BLOCKNUM rightmost_blocknum_before_merge = ft->rightmost_blocknum; + const int num_children_before_merge = root_node->n_children; + + // delete the last 6 rows. + // - 1mb each, so 6mb deleted + // - should be enough to delete the entire rightmost leaf + some of its neighbor + const int rows_to_delete = 6; + toku_unpin_ftnode(ft, root_node); + for (int i = 0; i < rows_to_delete; i++) { + k = toku_htonl(rows_to_insert - i); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_delete(ft_handle, &key, NULL); + } + toku_pin_ftnode(ft, root_blocknum, + toku_cachetable_hash(ft->cf, root_blocknum), + &bfe, PL_WRITE_EXPENSIVE, &root_node, true); + + // - rightmost leaf should be fusible after those deletes (which were promoted directly to the leaf) + FTNODE rightmost_leaf; + toku_pin_ftnode(ft, rightmost_blocknum_before_merge, + toku_cachetable_hash(ft->cf, rightmost_blocknum_before_merge), + &bfe, PL_WRITE_EXPENSIVE, &rightmost_leaf, true); + invariant(get_node_reactivity(ft, rightmost_leaf) == RE_FUSIBLE); + toku_unpin_ftnode(ft, rightmost_leaf); + + // - merge the rightmost child now that it's fusible + toku_ft_merge_child(ft, root_node, root_node->n_children - 1); + toku_pin_ftnode(ft, root_blocknum, + toku_cachetable_hash(ft->cf, root_blocknum), + &bfe, PL_WRITE_EXPENSIVE, &root_node, true); + + // the merge should have worked, and the root should still be at height 1 + invariant(root_node->n_children < num_children_before_merge); + invariant(root_node->height == 1); + // the rightmost child of the root has the rightmost blocknum + invariant(BP_BLOCKNUM(root_node, root_node->n_children - 1).b == ft->rightmost_blocknum.b); + // the value for rightmost blocknum itself should not have changed + // (we keep it constant, like the root blocknum) + invariant(rightmost_blocknum_before_merge.b == ft->rightmost_blocknum.b); + + toku_unpin_ftnode(ft, root_node); + + toku_free(val_buf); + toku_ft_handle_close(ft_handle); + toku_cachetable_close(&ct); + toku_os_recursive_delete(TOKU_TEST_FILENAME); +} + +int test_main(int argc, const char *argv[]) { + default_parse_args(argc, argv); + test_split_merge(); + return 0; +} diff --git a/storage/tokudb/ft-index/ft/tokuftdump.cc b/storage/tokudb/ft-index/ft/tokuftdump.cc index f2d4fce83cb..a7d94f41d78 100644 --- a/storage/tokudb/ft-index/ft/tokuftdump.cc +++ b/storage/tokudb/ft-index/ft/tokuftdump.cc @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -/* Tell me the diff between two FT files. */ +// Dump a fractal tree file #include "cachetable.h" #include "ft.h" @@ -102,20 +102,26 @@ PATENT RIGHTS GRANT: #include <inttypes.h> #include <limits.h> -static void -format_time(const uint64_t time_int, char *buf) { +static int do_dump_data = 1; +static int do_interactive = 0; +static int do_header = 0; +static int do_fragmentation = 0; +static int do_garbage = 0; +static int do_translation_table = 0; +static int do_rootnode = 0; +static int do_tsv = 0; + +static const char *arg0; +static const char *fname; + +static void format_time(const uint64_t time_int, char *buf) { time_t timer = (time_t) time_int; ctime_r(&timer, buf); assert(buf[24] == '\n'); buf[24] = 0; } -static int dump_data = 1; - -static CACHETABLE ct; - -static void -print_item (bytevec val, ITEMLEN len) { +static void print_item(bytevec val, ITEMLEN len) { printf("\""); ITEMLEN i; for (i=0; i<len; i++) { @@ -129,16 +135,14 @@ print_item (bytevec val, ITEMLEN len) { printf("\""); } -static void -simple_hex_dump(unsigned char *vp, uint64_t size) { +static void simple_hex_dump(unsigned char *vp, uint64_t size) { for (uint64_t i = 0; i < size; i++) { unsigned char c = vp[i]; printf("%2.2X", c); } } -static void -hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) { +static void hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) { uint64_t n = size / 32; for (uint64_t i = 0; i < n; i++) { printf("%" PRIu64 ": ", offset); @@ -169,25 +173,26 @@ hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) { printf("\n"); } -static void -dump_descriptor(DESCRIPTOR d) { +static void dump_descriptor(DESCRIPTOR d) { printf(" descriptor size %u ", d->dbt.size); simple_hex_dump((unsigned char*) d->dbt.data, d->dbt.size); printf("\n"); } -static void -open_header (int f, FT *header, CACHEFILE cf) { +static void open_header(int fd, FT *header, CACHEFILE cf) { FT ft = NULL; int r; - r = toku_deserialize_ft_from (f, MAX_LSN, &ft); - assert(r==0); + r = toku_deserialize_ft_from (fd, MAX_LSN, &ft); + if (r != 0) { + fprintf(stderr, "%s: can not deserialize from %s error %d\n", arg0, fname, r); + exit(1); + } + assert_zero(r); ft->cf = cf; *header = ft; } -static void -dump_header(FT ft) { +static void dump_header(FT ft) { char timestr[26]; printf("ft:\n"); printf(" layout_version=%d\n", ft->h->layout_version); @@ -212,29 +217,19 @@ dump_header(FT ft) { printf(" estimated numbytes=%" PRId64 "\n", ft->in_memory_stats.numbytes); } -static int -print_le( - const void* key, - const uint32_t keylen, - const LEAFENTRY &le, - const uint32_t idx UU(), - void *const ai UU() - ) -{ +static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le, const uint32_t idx UU(), void *const ai UU()) { print_klpair(stdout, key, keylen, le); printf("\n"); return 0; } - -static void -dump_node (int f, BLOCKNUM blocknum, FT h) { +static void dump_node(int fd, BLOCKNUM blocknum, FT h) { FTNODE n; struct ftnode_fetch_extra bfe; FTNODE_DISK_DATA ndd = NULL; fill_bfe_for_full_read(&bfe, h); - int r = toku_deserialize_ftnode_from (f, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); - assert(r==0); + int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); + assert_zero(r); assert(n!=0); printf("ftnode\n"); DISKOFF disksize, diskoffset; @@ -271,15 +266,16 @@ dump_node (int f, BLOCKNUM blocknum, FT h) { } printf(" children:\n"); for (int i=0; i<n->n_children; i++) { + printf(" child %d: ", i); if (n->height > 0) { - printf(" child %d: %" PRId64 "\n", i, BP_BLOCKNUM(n, i).b); + printf("%" PRId64 "\n", BP_BLOCKNUM(n, i).b); NONLEAF_CHILDINFO bnc = BNC(n, i); unsigned int n_bytes = toku_bnc_nbytesinbuf(bnc); int n_entries = toku_bnc_n_entries(bnc); if (n_bytes > 0 || n_entries > 0) { printf(" buffer contains %u bytes (%d items)\n", n_bytes, n_entries); } - if (dump_data) { + if (do_dump_data) { FIFO_ITERATE(bnc->buffer, key, keylen, data, datalen, typ, msn, xids, UU(is_fresh), { printf(" msn=%" PRIu64 " (0x%" PRIx64 ") ", msn.msn, msn.msn); @@ -316,7 +312,7 @@ dump_node (int f, BLOCKNUM blocknum, FT h) { } else { printf(" n_bytes_in_buffer= %" PRIu64 "", BLB_DATA(n, i)->get_disk_size()); printf(" items_in_buffer=%u\n", BLB_DATA(n, i)->num_klpairs()); - if (dump_data) { + if (do_dump_data) { BLB_DATA(n, i)->iterate<void, print_le>(NULL); } } @@ -325,13 +321,11 @@ dump_node (int f, BLOCKNUM blocknum, FT h) { toku_free(ndd); } -static void -dump_block_translation(FT h, uint64_t offset) { +static void dump_block_translation(FT h, uint64_t offset) { toku_blocknum_dump_translation(h->blocktable, make_blocknum(offset)); } -static void -dump_fragmentation(int UU(f), FT h, int tsv) { +static void dump_fragmentation(int UU(f), FT h, int tsv) { int64_t used_space; int64_t total_space; toku_blocktable_internal_fragmentation(h->blocktable, &total_space, &used_space); @@ -349,21 +343,20 @@ dump_fragmentation(int UU(f), FT h, int tsv) { } typedef struct { - int f; + int fd; FT h; uint64_t blocksizes; uint64_t leafsizes; uint64_t leafblocks; } frag_help_extra; -static int -nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) { +static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) { frag_help_extra *CAST_FROM_VOIDP(info, extra); FTNODE n; FTNODE_DISK_DATA ndd = NULL; struct ftnode_fetch_extra bfe; fill_bfe_for_full_read(&bfe, info->h); - int r = toku_deserialize_ftnode_from(info->f, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); + int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); if (r==0) { info->blocksizes += size; if (n->height == 0) { @@ -376,11 +369,10 @@ nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) { return 0; } -static void -dump_nodesizes(int f, FT h) { +static void dump_nodesizes(int fd, FT h) { frag_help_extra info; memset(&info, 0, sizeof(info)); - info.f = f; + info.fd = fd; info.h = h; toku_blocktable_iterate(h->blocktable, TRANSLATION_CHECKPOINTED, nodesizes_helper, &info, true, true); @@ -389,36 +381,45 @@ dump_nodesizes(int f, FT h) { printf("leafsizes\t%" PRIu64 "\n", info.leafsizes); } -static void -dump_garbage_stats(int f, FT ft) { - invariant(f == toku_cachefile_get_fd(ft->cf)); +static void dump_garbage_stats(int fd, FT ft) { + assert(fd == toku_cachefile_get_fd(ft->cf)); uint64_t total_space = 0; uint64_t used_space = 0; toku_ft_get_garbage(ft, &total_space, &used_space); - printf("total_size\t%" PRIu64 "\n", total_space); - printf("used_size\t%" PRIu64 "\n", used_space); + printf("garbage total size\t%" PRIu64 "\n", total_space); + printf("garbage used size\t%" PRIu64 "\n", used_space); } -static uint32_t -get_unaligned_uint32(unsigned char *p) { - return *(uint32_t *)p; +typedef struct __dump_node_extra { + int fd; + FT h; +} dump_node_extra; + +static int dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) { + dump_node_extra *CAST_FROM_VOIDP(info, extra); + dump_node(info->fd, b, info->h); + return 0; +} + +static uint32_t get_unaligned_uint32(unsigned char *p) { + uint32_t n; + memcpy(&n, p, sizeof n); + return n; } struct dump_sub_block { - uint32_t compressed_size; - uint32_t uncompressed_size; - uint32_t xsum; + uint32_t compressed_size; + uint32_t uncompressed_size; + uint32_t xsum; }; -static void -sub_block_deserialize(struct dump_sub_block *sb, unsigned char *sub_block_header) { +static void sub_block_deserialize(struct dump_sub_block *sb, unsigned char *sub_block_header) { sb->compressed_size = toku_dtoh32(get_unaligned_uint32(sub_block_header+0)); sb->uncompressed_size = toku_dtoh32(get_unaligned_uint32(sub_block_header+4)); sb->xsum = toku_dtoh32(get_unaligned_uint32(sub_block_header+8)); } -static void -verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) { +static void verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) { // verify the header checksum const size_t node_header = 8 + sizeof (uint32_t) + sizeof (uint32_t) + sizeof (uint32_t); @@ -461,24 +462,22 @@ verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) { printf("offset %u expected %" PRIu64 "\n", offset, size); } -static void -dump_block(int f, BLOCKNUM blocknum, FT h) { +static void dump_block(int fd, BLOCKNUM blocknum, FT h) { DISKOFF offset, size; toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size); printf("%" PRId64 " at %" PRId64 " size %" PRId64 "\n", blocknum.b, offset, size); unsigned char *CAST_FROM_VOIDP(vp, toku_malloc(size)); - uint64_t r = pread(f, vp, size, offset); + uint64_t r = pread(fd, vp, size, offset); if (r == (uint64_t)size) { verify_block(vp, offset, size); } toku_free(vp); } -static void -dump_file(int f, uint64_t offset, uint64_t size, FILE *outfp) { +static void dump_file(int fd, uint64_t offset, uint64_t size, FILE *outfp) { unsigned char *XMALLOC_N(size, vp); - uint64_t r = pread(f, vp, size, offset); + uint64_t r = pread(fd, vp, size, offset); if (r == size) { if (outfp == stdout) { hex_dump(vp, offset, size); @@ -490,13 +489,11 @@ dump_file(int f, uint64_t offset, uint64_t size, FILE *outfp) { toku_free(vp); } -static void -set_file(int f, uint64_t offset, unsigned char newc) { - toku_os_pwrite(f, &newc, sizeof newc, offset); +static void set_file(int fd, uint64_t offset, unsigned char newc) { + toku_os_pwrite(fd, &newc, sizeof newc, offset); } -static int -readline (char *line, int maxline) { +static int readline(char *line, int maxline) { int i = 0; int c; while ((c = getchar()) != EOF && c != '\n' && i < maxline) { @@ -506,8 +503,7 @@ readline (char *line, int maxline) { return c == EOF ? EOF : i; } -static int -split_fields (char *line, char *fields[], int maxfields) { +static int split_fields(char *line, char *fields[], int maxfields) { int i; for (i=0; i<maxfields; i++) fields[i] = NULL; @@ -520,26 +516,16 @@ split_fields (char *line, char *fields[], int maxfields) { return i; } -static int -usage(const char *arg0) { - printf("Usage: %s [--nodata] [--i[nteractive]|--fragmentation [--tsv]|--translation-table|--rootnode] ftfilename\n", arg0); - return 1; -} - -typedef struct __dump_node_extra { - int f; - FT h; -} dump_node_extra; - -static int -dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) { - dump_node_extra *CAST_FROM_VOIDP(info, extra); - dump_node(info->f, b, info->h); - return 0; +static uint64_t getuint64(const char *f) { + if (strncmp(f, "0x", 2) == 0 || strncmp(f, "0X", 2) == 0) + return strtoull(f, 0, 16); + else if (strncmp(f, "0", 1) == 0) + return strtoull(f, 0, 8); + else + return strtoull(f, 0, 10); } -static void -interactive_help(void) { +static void interactive_help(void) { fprintf(stderr, "help\n"); fprintf(stderr, "header\n"); fprintf(stderr, "node NUMBER\n"); @@ -552,133 +538,160 @@ interactive_help(void) { fprintf(stderr, "quit\n"); } -static uint64_t -getuint64(const char *f) { - if (strncmp(f, "0x", 2) == 0 || strncmp(f, "0X", 2) == 0) - return strtoull(f, 0, 16); - else if (strncmp(f, "0", 1) == 0) - return strtoull(f, 0, 8); - else - return strtoull(f, 0, 10); +static void run_iteractive_loop(int fd, FT ft, CACHEFILE cf) { + while (1) { + printf("ftdump>"); fflush(stdout); + enum { maxline = 64}; + char line[maxline+1]; + int r = readline(line, maxline); + if (r == EOF) + break; + const int maxfields = 4; + char *fields[maxfields]; + int nfields = split_fields(line, fields, maxfields); + if (nfields == 0) + continue; + if (strcmp(fields[0], "help") == 0) { + interactive_help(); + } else if (strcmp(fields[0], "header") == 0) { + toku_ft_free(ft); + open_header(fd, &ft, cf); + dump_header(ft); + } else if (strcmp(fields[0], "block") == 0 && nfields == 2) { + BLOCKNUM blocknum = make_blocknum(getuint64(fields[1])); + dump_block(fd, blocknum, ft); + } else if (strcmp(fields[0], "node") == 0 && nfields == 2) { + BLOCKNUM off = make_blocknum(getuint64(fields[1])); + dump_node(fd, off, ft); + } else if (strcmp(fields[0], "dumpdata") == 0 && nfields == 2) { + do_dump_data = strtol(fields[1], NULL, 10); + } else if (strcmp(fields[0], "block_translation") == 0 || strcmp(fields[0], "bx") == 0) { + uint64_t offset = 0; + if (nfields == 2) + offset = getuint64(fields[1]); + dump_block_translation(ft, offset); + } else if (strcmp(fields[0], "fragmentation") == 0) { + dump_fragmentation(fd, ft, do_tsv); + } else if (strcmp(fields[0], "nodesizes") == 0) { + dump_nodesizes(fd, ft); + } else if (strcmp(fields[0], "garbage") == 0) { + dump_garbage_stats(fd, ft); + } else if (strcmp(fields[0], "file") == 0 && nfields >= 3) { + uint64_t offset = getuint64(fields[1]); + uint64_t size = getuint64(fields[2]); + FILE *outfp = stdout; + if (nfields >= 4) + outfp = fopen(fields[3], "w"); + dump_file(fd, offset, size, outfp); + } else if (strcmp(fields[0], "setfile") == 0 && nfields == 3) { + uint64_t offset = getuint64(fields[1]); + unsigned char newc = getuint64(fields[2]); + set_file(fd, offset, newc); + } else if (strcmp(fields[0], "quit") == 0 || strcmp(fields[0], "q") == 0) { + break; + } + } } -int -main (int argc, const char *const argv[]) { - int interactive = 0; - int fragmentation = 0; - int translation_table = 0; - int rootnode = 0; - int tsv = 0; +static int usage(void) { + fprintf(stderr, "Usage: %s ", arg0); + fprintf(stderr, "--interactive "); + fprintf(stderr, "--nodata "); + fprintf(stderr, "--dumpdata 0|1 "); + fprintf(stderr, "--header "); + fprintf(stderr, "--rootnode "); + fprintf(stderr, "--fragmentation "); + fprintf(stderr, "--garbage "); + fprintf(stderr, "--tsv "); + fprintf(stderr, "--translation-table "); + fprintf(stderr, "--tsv "); + fprintf(stderr, "ftfilename \n"); + return 1; +} - const char *arg0 = argv[0]; +int main (int argc, const char *const argv[]) { + arg0 = argv[0]; argc--; argv++; while (argc>0) { - if (strcmp(argv[0], "--nodata") == 0) { - dump_data = 0; - } else if (strcmp(argv[0], "--interactive") == 0 || strcmp(argv[0], "--i") == 0) { - interactive = 1; + if (strcmp(argv[0], "--interactive") == 0 || strcmp(argv[0], "--i") == 0) { + do_interactive = 1; + } else if (strcmp(argv[0], "--nodata") == 0) { + do_dump_data = 0; + } else if (strcmp(argv[0], "--dumpdata") == 0 && argc > 1) { + argc--; argv++; + do_dump_data = atoi(argv[0]); + } else if (strcmp(argv[0], "--header") == 0) { + do_header = 1; + } else if (strcmp(argv[0], "--rootnode") == 0) { + do_rootnode = 1; } else if (strcmp(argv[0], "--fragmentation") == 0) { - fragmentation = 1; + do_fragmentation = 1; + } else if (strcmp(argv[0], "--garbage") == 0) { + do_garbage = 1; } else if (strcmp(argv[0], "--tsv") == 0) { - tsv = 1; + do_tsv = 1; } else if (strcmp(argv[0], "--translation-table") == 0) { - translation_table = 1; - } else if (strcmp(argv[0], "--rootnode") == 0) { - rootnode = 1; - } else if (strcmp(argv[0], "--help") == 0) { - return usage(arg0); + do_translation_table = 1; + } else if (strcmp(argv[0], "--help") == 0 || strcmp(argv[0], "-?") == 0 || strcmp(argv[0], "-h") == 0) { + return usage(); } else { break; } argc--; argv++; } - if (argc != 1) return usage(arg0); + if (argc != 1) + return usage(); int r = toku_ft_layer_init(); - invariant_zero(r); + assert_zero(r); + + fname = argv[0]; + int fd = open(fname, O_RDWR + O_BINARY); + if (fd < 0) { + fprintf(stderr, "%s: can not open %s errno %d\n", arg0, fname, errno); + return 1; + } - const char *n = argv[0]; - int f = open(n, O_RDWR + O_BINARY); assert(f>=0); - FT ft; // create a cachefile for the header + CACHETABLE ct = NULL; toku_cachetable_create(&ct, 1<<25, (LSN){0}, 0); + CACHEFILE cf = NULL; - r = toku_cachetable_openfd (&cf, ct, f, n); - assert(r==0); - open_header(f, &ft, cf); - if (!fragmentation && !translation_table) { - // quick fix for now, we want those two to have clean output - dump_header(ft); - } - if (interactive) { - while (1) { - printf("ftdump>"); fflush(stdout); - enum { maxline = 64}; - char line[maxline+1]; - r = readline(line, maxline); - if (r == EOF) - break; - const int maxfields = 4; - char *fields[maxfields]; - int nfields = split_fields(line, fields, maxfields); - if (nfields == 0) - continue; - if (strcmp(fields[0], "help") == 0) { - interactive_help(); - } else if (strcmp(fields[0], "header") == 0) { - toku_ft_free(ft); - open_header(f, &ft, cf); - dump_header(ft); - } else if (strcmp(fields[0], "block") == 0 && nfields == 2) { - BLOCKNUM blocknum = make_blocknum(getuint64(fields[1])); - dump_block(f, blocknum, ft); - } else if (strcmp(fields[0], "node") == 0 && nfields == 2) { - BLOCKNUM off = make_blocknum(getuint64(fields[1])); - dump_node(f, off, ft); - } else if (strcmp(fields[0], "dumpdata") == 0 && nfields == 2) { - dump_data = strtol(fields[1], NULL, 10); - } else if (strcmp(fields[0], "block_translation") == 0 || strcmp(fields[0], "bx") == 0) { - uint64_t offset = 0; - if (nfields == 2) - offset = getuint64(fields[1]); - dump_block_translation(ft, offset); - } else if (strcmp(fields[0], "fragmentation") == 0) { - dump_fragmentation(f, ft, tsv); - } else if (strcmp(fields[0], "nodesizes") == 0) { - dump_nodesizes(f, ft); - } else if (strcmp(fields[0], "garbage") == 0) { - dump_garbage_stats(f, ft); - } else if (strcmp(fields[0], "file") == 0 && nfields >= 3) { - uint64_t offset = getuint64(fields[1]); - uint64_t size = getuint64(fields[2]); - FILE *outfp = stdout; - if (nfields >= 4) - outfp = fopen(fields[3], "w"); - dump_file(f, offset, size, outfp); - } else if (strcmp(fields[0], "setfile") == 0 && nfields == 3) { - uint64_t offset = getuint64(fields[1]); - unsigned char newc = getuint64(fields[2]); - set_file(f, offset, newc); - } else if (strcmp(fields[0], "quit") == 0 || strcmp(fields[0], "q") == 0) { - break; - } - } - } else if (rootnode) { - dump_node(f, ft->h->root_blocknum, ft); - } else if (fragmentation) { - dump_fragmentation(f, ft, tsv); - } else if (translation_table) { - toku_dump_translation_table_pretty(stdout, ft->blocktable); - } else { - printf("Block translation:"); + r = toku_cachetable_openfd (&cf, ct, fd, fname); + assert_zero(r); - toku_dump_translation_table(stdout, ft->blocktable); + FT ft = NULL; + open_header(fd, &ft, cf); - struct __dump_node_extra info; - info.f = f; - info.h = ft; - toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED, - dump_node_wrapper, &info, true, true); + if (do_interactive) { + run_iteractive_loop(fd, ft, cf); + } else { + if (do_header) { + dump_header(ft); + } + if (do_rootnode) { + dump_node(fd, ft->h->root_blocknum, ft); + } + if (do_fragmentation) { + dump_fragmentation(fd, ft, do_tsv); + } + if (do_translation_table) { + toku_dump_translation_table_pretty(stdout, ft->blocktable); + } + if (do_garbage) { + dump_garbage_stats(fd, ft); + } + if (!do_header && !do_rootnode && !do_fragmentation && !do_translation_table && !do_garbage) { + printf("Block translation:"); + + toku_dump_translation_table(stdout, ft->blocktable); + + struct __dump_node_extra info; + info.fd = fd; + info.h = ft; + toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED, + dump_node_wrapper, &info, true, true); + } } toku_cachefile_close(&cf, false, ZERO_LSN); toku_cachetable_close(&ct); diff --git a/storage/tokudb/ft-index/scripts/run.stress-tests.py b/storage/tokudb/ft-index/scripts/run.stress-tests.py index fbbf5ee6472..d4245a7c4b4 100755 --- a/storage/tokudb/ft-index/scripts/run.stress-tests.py +++ b/storage/tokudb/ft-index/scripts/run.stress-tests.py @@ -735,6 +735,7 @@ if __name__ == '__main__': 'test_stress6.tdb', 'test_stress7.tdb', 'test_stress_hot_indexing.tdb', + 'test_stress_with_verify.tdb', 'test_stress_openclose.tdb'] default_recover_testnames = ['recover-test_stress1.tdb', 'recover-test_stress2.tdb', @@ -766,8 +767,8 @@ if __name__ == '__main__': help="skip the tests that don't involve upgrade [default=False]") upgrade_group.add_option('--double_upgrade', action='store_true', dest='double_upgrade', default=False, help='run the upgrade tests twice in a row [default=False]') - upgrade_group.add_option('--add_old_version', action='append', type='choice', dest='old_versions', choices=['4.2.0', '5.0.8', '5.2.7', '6.0.0', '6.1.0', '6.5.1', '6.6.3'], - help='which old versions to use for running the stress tests in upgrade mode. can be specified multiple times [options=4.2.0, 5.0.8, 5.2.7, 6.0.0, 6.1.0, 6.5.1, 6.6.3]') + upgrade_group.add_option('--add_old_version', action='append', type='choice', dest='old_versions', choices=['4.2.0', '5.0.8', '5.2.7', '6.0.0', '6.1.0', '6.5.1', '6.6.3', '7.1.6'], + help='which old versions to use for running the stress tests in upgrade mode. can be specified multiple times [options=4.2.0, 5.0.8, 5.2.7, 6.0.0, 6.1.0, 6.5.1, 6.6.3, 7.1.6]') upgrade_group.add_option('--old_environments_dir', type='string', dest='old_environments_dir', default=('%s/old-stress-test-envs' % default_tokudb_data), help='directory containing old version environments (should contain 5.0.8/, 5.2.7/, etc, and the environments should be in those) [default=../../tokudb.data/stress_environments]') diff --git a/storage/tokudb/ft-index/src/loader.cc b/storage/tokudb/ft-index/src/loader.cc index 88db258e1ff..62b4f0b6cef 100644 --- a/storage/tokudb/ft-index/src/loader.cc +++ b/storage/tokudb/ft-index/src/loader.cc @@ -172,6 +172,13 @@ struct __toku_loader_internal { char **inames_in_env; /* [N] inames of new files to be created */ }; +static void free_inames(char **inames, int n) { + for (int i = 0; i < n; i++) { + toku_free(inames[i]); + } + toku_free(inames); +} + /* * free_loader_resources() frees all of the resources associated with * struct __toku_loader_internal @@ -185,16 +192,15 @@ static void free_loader_resources(DB_LOADER *loader) toku_destroy_dbt(&loader->i->err_val); if (loader->i->inames_in_env) { - for (int i=0; i<loader->i->N; i++) { - if (loader->i->inames_in_env[i]) toku_free(loader->i->inames_in_env[i]); - } - toku_free(loader->i->inames_in_env); + free_inames(loader->i->inames_in_env, loader->i->N); + loader->i->inames_in_env = nullptr; } - if (loader->i->temp_file_template) toku_free(loader->i->temp_file_template); + toku_free(loader->i->temp_file_template); + loader->i->temp_file_template = nullptr; // loader->i toku_free(loader->i); - loader->i = NULL; + loader->i = nullptr; } } @@ -245,6 +251,7 @@ toku_loader_create_loader(DB_ENV *env, bool check_empty) { int rval; HANDLE_READ_ONLY_TXN(txn); + DB_TXN *loader_txn = nullptr; *blp = NULL; // set later when created @@ -299,6 +306,13 @@ toku_loader_create_loader(DB_ENV *env, } { + if (env->i->open_flags & DB_INIT_TXN) { + rval = env->txn_begin(env, txn, &loader_txn, 0); + if (rval) { + goto create_exit; + } + } + ft_compare_func compare_functions[N]; for (int i=0; i<N; i++) { compare_functions[i] = env->i->bt_compare; @@ -306,18 +320,21 @@ toku_loader_create_loader(DB_ENV *env, // time to open the big kahuna char **XMALLOC_N(N, new_inames_in_env); + for (int i = 0; i < N; i++) { + new_inames_in_env[i] = nullptr; + } FT_HANDLE *XMALLOC_N(N, fts); for (int i=0; i<N; i++) { fts[i] = dbs[i]->i->ft_handle; } LSN load_lsn; - rval = locked_load_inames(env, txn, N, dbs, new_inames_in_env, &load_lsn, puts_allowed); + rval = locked_load_inames(env, loader_txn, N, dbs, new_inames_in_env, &load_lsn, puts_allowed); if ( rval!=0 ) { - toku_free(new_inames_in_env); + free_inames(new_inames_in_env, N); toku_free(fts); goto create_exit; } - TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; + TOKUTXN ttxn = loader_txn ? db_txn_struct_i(loader_txn)->tokutxn : NULL; rval = toku_ft_loader_open(&loader->i->ft_loader, env->i->cachetable, env->i->generate_row_for_put, @@ -331,12 +348,14 @@ toku_loader_create_loader(DB_ENV *env, ttxn, puts_allowed, env->get_loader_memory_size(env), - compress_intermediates); + compress_intermediates, + puts_allowed); if ( rval!=0 ) { - toku_free(new_inames_in_env); + free_inames(new_inames_in_env, N); toku_free(fts); goto create_exit; } + loader->i->inames_in_env = new_inames_in_env; toku_free(fts); @@ -348,10 +367,19 @@ toku_loader_create_loader(DB_ENV *env, rval = 0; } + rval = loader_txn->commit(loader_txn, 0); + assert_zero(rval); + loader_txn = nullptr; + rval = 0; } *blp = loader; create_exit: + if (loader_txn) { + int r = loader_txn->abort(loader_txn); + assert_zero(r); + loader_txn = nullptr; + } if (rval == 0) { (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CREATE), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CURRENT), 1); @@ -441,7 +469,7 @@ static void redirect_loader_to_empty_dictionaries(DB_LOADER *loader) { loader->i->dbs, loader->i->db_flags, loader->i->dbt_flags, - 0, + LOADER_DISALLOW_PUTS, false ); lazy_assert_zero(r); diff --git a/storage/tokudb/ft-index/src/tests/dbremove-nofile-limit.cc b/storage/tokudb/ft-index/src/tests/dbremove-nofile-limit.cc new file mode 100644 index 00000000000..eb5c6b80b63 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/dbremove-nofile-limit.cc @@ -0,0 +1,177 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." +#ident "$Id$" + +// This test verifies that the env->dbremove function returns an error rather than +// crash when the NOFILE resource limit is exceeded. + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static const char *envdir = TOKU_TEST_FILENAME; + +static void test_dbremove() { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *db; + r = db_create(&db, env, 0); CKERR(r); + char fname[32]; + sprintf(fname, "db%d", 0); + r = db->open(db, nullptr, fname, nullptr, DB_BTREE, DB_CREATE, 0666); CKERR(r); + + r = db->close(db, 0); CKERR(r); + + DB_TXN *txn; + r = env->txn_begin(env, nullptr, &txn, 0); CKERR(r); + + struct rlimit current_limit; + r = getrlimit(RLIMIT_NOFILE, ¤t_limit); + assert(r == 0); + + struct rlimit new_limit = current_limit; + new_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NOFILE, &new_limit); + assert(r == 0); + + r = env->dbremove(env, txn, fname, nullptr, 0); + CKERR2(r, EMFILE); + + r = setrlimit(RLIMIT_NOFILE, ¤t_limit); + assert(r == 0); + + r = env->dbremove(env, txn, fname, nullptr, 0); + CKERR(r); + + r = txn->commit(txn, 0); CKERR(r); + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + test_dbremove(); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/loader-close-nproc-limit.cc b/storage/tokudb/ft-index/src/tests/loader-close-nproc-limit.cc new file mode 100644 index 00000000000..3ef2b0541f7 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/loader-close-nproc-limit.cc @@ -0,0 +1,198 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +// Verify that loader->close works correctly (does not crash, does not leak memory, returns the right error code) +// when the NPROC limit is exceeded. + +#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." +#ident "$Id$" + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static int loader_flags = 0; +static const char *envdir = TOKU_TEST_FILENAME; + +static void run_test(int ndb) { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *dbs[ndb]; + uint32_t db_flags[ndb]; + uint32_t dbt_flags[ndb]; + for (int i = 0; i < ndb; i++) { + db_flags[i] = DB_NOOVERWRITE; + dbt_flags[i] = 0; + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r); + } + + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + + DB_LOADER *loader; + r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); CKERR(r); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + r = loader->close(loader); + + if (loader_flags & LOADER_DISALLOW_PUTS) + CKERR(r); + else + CKERR2(r, EAGAIN); + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + r = txn->abort(txn); CKERR(r); + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else if (strcmp(argv[0], "-p") == 0) { + loader_flags |= LOADER_DISALLOW_PUTS; + } else if (strcmp(argv[0], "-z") == 0) { + loader_flags |= LOADER_COMPRESS_INTERMEDIATES; + } else if (strcmp(argv[0], "-e") == 0) { + argc--; argv++; + if (argc > 0) + envdir = argv[0]; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + run_test(1); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/loader-create-close.cc b/storage/tokudb/ft-index/src/tests/loader-create-close.cc index 6a04387152f..4d66a9df004 100644 --- a/storage/tokudb/ft-index/src/tests/loader-create-close.cc +++ b/storage/tokudb/ft-index/src/tests/loader-create-close.cc @@ -97,11 +97,7 @@ PATENT RIGHTS GRANT: static int loader_flags = 0; static const char *envdir = TOKU_TEST_FILENAME; -static int put_multiple_generate(DB *UU(dest_db), DB *UU(src_db), DBT_ARRAY *UU(dest_keys), DBT_ARRAY *UU(dest_vals), const DBT *UU(src_key), const DBT *UU(src_val)) { - return ENOMEM; -} - -static void loader_open_abort(int ndb) { +static void test_loader_create_close(int ndb) { int r; char rmcmd[32 + strlen(envdir)]; @@ -111,8 +107,6 @@ static void loader_open_abort(int ndb) { DB_ENV *env; r = db_env_create(&env, 0); CKERR(r); - r = env->set_generate_row_callback_for_put(env, put_multiple_generate); - CKERR(r); int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); env->set_errfile(env, stderr); @@ -181,8 +175,8 @@ static void do_args(int argc, char * const argv[]) { int test_main(int argc, char * const *argv) { do_args(argc, argv); - loader_open_abort(0); - loader_open_abort(1); - loader_open_abort(2); + test_loader_create_close(0); + test_loader_create_close(1); + test_loader_create_close(2); return 0; } diff --git a/storage/tokudb/ft-index/src/tests/loader-create-commit-nproc-limit.cc b/storage/tokudb/ft-index/src/tests/loader-create-commit-nproc-limit.cc new file mode 100644 index 00000000000..091809a8551 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/loader-create-commit-nproc-limit.cc @@ -0,0 +1,211 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." +#ident "$Id$" + +// This test crashes if a failed loader creation causes the db to be corrupted by unlinking +// the underlying fractal tree files. This unlinking occurs because the txn that logs the +// load log entries is committed rather than aborted. + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static int loader_flags = 0; +static const char *envdir = TOKU_TEST_FILENAME; + +static void run_test(int ndb) { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *dbs[ndb]; + uint32_t db_flags[ndb]; + uint32_t dbt_flags[ndb]; + for (int i = 0; i < ndb; i++) { + db_flags[i] = DB_NOOVERWRITE; + dbt_flags[i] = 0; + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r); + } + + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + DB_LOADER *loader; + int loader_r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + if (loader_flags & LOADER_DISALLOW_PUTS) { + CKERR(loader_r); + loader_r = loader->close(loader); + CKERR(loader_r); + } else { + CKERR2(loader_r, EAGAIN); + } + + r = txn->commit(txn, 0); CKERR(r); + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + for (int i = 0; i < ndb; i++) { + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, 0, 0666); CKERR(r); + } + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else if (strcmp(argv[0], "-p") == 0) { + loader_flags |= LOADER_DISALLOW_PUTS; + } else if (strcmp(argv[0], "-z") == 0) { + loader_flags |= LOADER_COMPRESS_INTERMEDIATES; + } else if (strcmp(argv[0], "-e") == 0) { + argc--; argv++; + if (argc > 0) + envdir = argv[0]; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + run_test(1); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/loader-create-nproc-limit.cc b/storage/tokudb/ft-index/src/tests/loader-create-nproc-limit.cc new file mode 100644 index 00000000000..7a61fce7799 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/loader-create-nproc-limit.cc @@ -0,0 +1,199 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +// Verify that env->create_loader works correctly (does not crash, does not leak memory, returns the right error code) +// when the NPROC limit is exceeded. + +#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." +#ident "$Id$" + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static int loader_flags = 0; +static const char *envdir = TOKU_TEST_FILENAME; + +static void run_test(int ndb) { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *dbs[ndb]; + uint32_t db_flags[ndb]; + uint32_t dbt_flags[ndb]; + for (int i = 0; i < ndb; i++) { + db_flags[i] = DB_NOOVERWRITE; + dbt_flags[i] = 0; + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r); + } + + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + DB_LOADER *loader; + int loader_r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + if (loader_flags & LOADER_DISALLOW_PUTS) { + CKERR(loader_r); + loader_r = loader->close(loader); + CKERR(loader_r); + } else { + CKERR2(loader_r, EAGAIN); + } + + r = txn->abort(txn); CKERR(r); + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else if (strcmp(argv[0], "-p") == 0) { + loader_flags |= LOADER_DISALLOW_PUTS; + } else if (strcmp(argv[0], "-z") == 0) { + loader_flags |= LOADER_COMPRESS_INTERMEDIATES; + } else if (strcmp(argv[0], "-e") == 0) { + argc--; argv++; + if (argc > 0) + envdir = argv[0]; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + run_test(1); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/test_insert_unique.cc b/storage/tokudb/ft-index/src/tests/test_insert_unique.cc new file mode 100644 index 00000000000..29439f9d704 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/test_insert_unique.cc @@ -0,0 +1,202 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +/** + * Test that unique inserts work correctly. This exercises the rightmost leaf inject optimization. + */ + +#include <portability/toku_random.h> + +#include "test.h" + +static char random_buf[8]; +static struct random_data random_data; + +static void test_simple_unique_insert(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); CKERR(r); + r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r); + + DBT key1, key2, key3; + dbt_init(&key1, "a", sizeof("a")); + dbt_init(&key2, "b", sizeof("b")); + dbt_init(&key3, "c", sizeof("c")); + r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR(r); + r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + r = db->put(db, NULL, &key3, &key3, DB_NOOVERWRITE); CKERR(r); + r = db->put(db, NULL, &key3, &key3, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + r = db->put(db, NULL, &key2, &key2, DB_NOOVERWRITE); CKERR(r); + r = db->put(db, NULL, &key2, &key2, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + // sanity check + r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + r = db->put(db, NULL, &key1, &key3, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + + r = db->close(db, 0); CKERR(r); + r = env->dbremove(env, NULL, "db", NULL, 0); CKERR(r); +} + +static void test_large_sequential_insert_unique(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); CKERR(r); + + // very small nodes/basements to make a taller tree + r = db->set_pagesize(db, 8 * 1024); CKERR(r); + r = db->set_readpagesize(db, 2 * 1024); CKERR(r); + r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r); + + const int val_size = 1024; + char *XMALLOC_N(val_size, val_buf); + memset(val_buf, 'k', val_size); + DBT val; + dbt_init(&val, val_buf, val_size); + + // grow a tree to about depth 3, taking sanity checks along the way + const int start_num_rows = (64 * 1024 * 1024) / val_size; + for (int i = 0; i < start_num_rows; i++) { + DBT key; + int k = toku_htonl(i); + dbt_init(&key, &k, sizeof(k)); + r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR(r); + if (i % 50 == 0) { + // sanity check - should not be able to insert this key twice in a row + r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + + // .. but re-inserting is okay, if we provisionally deleted the row + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + r = db->del(db, NULL, &key, DB_DELETE_ANY); CKERR(r); + r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR(r); + r = txn->commit(txn, 0); CKERR(r); + } + if (i > 0 && i % 250 == 0) { + // sanity check - unique checks on random keys we already inserted should + // fail (exercises middle-of-the-tree checks) + for (int check_i = 0; check_i < 4; check_i++) { + DBT rand_key; + int rand_k = toku_htonl(myrandom_r(&random_data) % i); + dbt_init(&rand_key, &rand_k, sizeof(rand_k)); + r = db->put(db, NULL, &rand_key, &val, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + } + } + } + + toku_free(val_buf); + r = db->close(db, 0); CKERR(r); + r = env->dbremove(env, NULL, "db", NULL, 0); CKERR(r); +} + + +int test_main(int argc, char * const argv[]) { + default_parse_args(argc, argv); + + int r; + const int envflags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD | + DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE; + + // startup + DB_ENV *env; + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); CKERR(r); + r = db_env_create(&env, 0); CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, envflags, 0755); + + r = myinitstate_r(random(), random_buf, 8, &random_data); CKERR(r); + + test_simple_unique_insert(env); + test_large_sequential_insert_unique(env); + + // cleanup + r = env->close(env, 0); CKERR(r); + + return 0; +} + diff --git a/storage/tokudb/ft-index/src/ydb.cc b/storage/tokudb/ft-index/src/ydb.cc index a2bb221a40b..df4fd6baf87 100644 --- a/storage/tokudb/ft-index/src/ydb.cc +++ b/storage/tokudb/ft-index/src/ydb.cc @@ -1160,6 +1160,7 @@ env_close(DB_ENV * env, uint32_t flags) { goto panic_and_quit_early; } } + env_fsync_log_cron_destroy(env); if (env->i->cachetable) { toku_cachetable_minicron_shutdown(env->i->cachetable); if (env->i->logger) { @@ -1200,7 +1201,6 @@ env_close(DB_ENV * env, uint32_t flags) { } env_fs_destroy(env); - env_fsync_log_cron_destroy(env); env->i->ltm.destroy(); if (env->i->data_dir) toku_free(env->i->data_dir); @@ -2901,7 +2901,13 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u r = toku_db_create(&db, env, 0); lazy_assert_zero(r); r = toku_db_open_iname(db, txn, iname, 0, 0); - lazy_assert_zero(r); + if (txn && r) { + if (r == EMFILE || r == ENFILE) + r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n"); + else + r = toku_ydb_do_error(env, r, "toku dbremove failed\n"); + goto exit; + } if (txn) { // Now that we have a writelock on dname, verify that there are still no handles open. (to prevent race conditions) if (env_is_db_with_dname_open(env, dname)) { diff --git a/storage/tokudb/ft-index/src/ydb_db.cc b/storage/tokudb/ft-index/src/ydb_db.cc index 78e08705ac6..b9fa32eb4a0 100644 --- a/storage/tokudb/ft-index/src/ydb_db.cc +++ b/storage/tokudb/ft-index/src/ydb_db.cc @@ -1221,36 +1221,14 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new int locked_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) { - int ret, r; + int r; HANDLE_READ_ONLY_TXN(txn); - DB_TXN *child_txn = NULL; - int using_txns = env->i->open_flags & DB_INIT_TXN; - if (using_txns) { - ret = toku_txn_begin(env, txn, &child_txn, 0); - invariant_zero(ret); - } - // cannot begin a checkpoint toku_multi_operation_client_lock(); - r = load_inames(env, child_txn, N, dbs, (const char **) new_inames_in_env, load_lsn, mark_as_loader); + r = load_inames(env, txn, N, dbs, (const char **) new_inames_in_env, load_lsn, mark_as_loader); toku_multi_operation_client_unlock(); - if (using_txns) { - if (r == 0) { - ret = locked_txn_commit(child_txn, DB_TXN_NOSYNC); - invariant_zero(ret); - } else { - ret = locked_txn_abort(child_txn); - invariant_zero(ret); - for (int i = 0; i < N; i++) { - if (new_inames_in_env[i]) { - toku_free(new_inames_in_env[i]); - new_inames_in_env[i] = NULL; - } - } - } - } return r; } diff --git a/storage/tokudb/ft-index/src/ydb_write.cc b/storage/tokudb/ft-index/src/ydb_write.cc index 4826e418ab5..82fbf439885 100644 --- a/storage/tokudb/ft-index/src/ydb_write.cc +++ b/storage/tokudb/ft-index/src/ydb_write.cc @@ -253,6 +253,30 @@ toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock) { return r; } +static int +db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, int flags, bool do_log) { + int r = 0; + bool unique = false; + enum ft_msg_type type = FT_INSERT; + if (flags == DB_NOOVERWRITE) { + unique = true; + } else if (flags == DB_NOOVERWRITE_NO_ERROR) { + type = FT_INSERT_NO_OVERWRITE; + } else if (flags != 0) { + // All other non-zero flags are unsupported + r = EINVAL; + } + if (r == 0) { + TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : nullptr; + if (unique) { + r = toku_ft_insert_unique(db->i->ft_handle, key, val, ttxn, do_log); + } else { + toku_ft_maybe_insert(db->i->ft_handle, key, val, ttxn, false, ZERO_LSN, do_log, type); + } + invariant(r == DB_KEYEXIST || r == 0); + } + return r; +} int toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock) { @@ -265,25 +289,16 @@ toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_ flags &= ~lock_flags; r = db_put_check_size_constraints(db, key, val); - if (r == 0) { - //Do any checking required by the flags. - r = db_put_check_overwrite_constraint(db, txn, key, lock_flags, flags); - } - //Do locking if necessary. Do not grab the lock again if this DB had a unique - //check performed because the lock was already grabbed by its cursor callback. + + //Do locking if necessary. bool do_locking = (bool)(db->i->lt && !(lock_flags&DB_PRELOCKED_WRITE)); - if (r == 0 && do_locking && !(flags & DB_NOOVERWRITE)) { + if (r == 0 && do_locking) { r = toku_db_get_point_write_lock(db, txn, key); } if (r == 0) { //Insert into the ft. - TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; - enum ft_msg_type type = FT_INSERT; - if (flags==DB_NOOVERWRITE_NO_ERROR) { - type = FT_INSERT_NO_OVERWRITE; - } if (!holds_mo_lock) toku_multi_operation_client_lock(); - toku_ft_maybe_insert(db->i->ft_handle, key, val, ttxn, false, ZERO_LSN, true, type); + r = db_put(db, txn, key, val, flags, true); if (!holds_mo_lock) toku_multi_operation_client_unlock(); } @@ -635,9 +650,11 @@ log_put_multiple(DB_TXN *txn, DB *src_db, const DBT *src_key, const DBT *src_val } } +// Requires: If remaining_flags is non-null, this function performs any required uniqueness checks +// Otherwise, the caller is responsible. static int -do_put_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], DBT_ARRAY vals[], DB *src_db, const DBT *src_key, bool indexer_shortcut) { - TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn; +do_put_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], DBT_ARRAY vals[], uint32_t *remaining_flags, DB *src_db, const DBT *src_key, bool indexer_shortcut) { + int r = 0; for (uint32_t which_db = 0; which_db < num_dbs; which_db++) { DB *db = db_array[which_db]; @@ -666,16 +683,21 @@ do_put_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], } if (do_put) { for (uint32_t i = 0; i < keys[which_db].size; i++) { - // if db is being indexed by an indexer, then put into that db if the src key is to the left or equal to the - // indexers cursor. we have to get the src_db from the indexer and find it in the db_array. - toku_ft_maybe_insert(db->i->ft_handle, - &keys[which_db].dbts[i], &vals[which_db].dbts[i], - ttxn, false, ZERO_LSN, false, FT_INSERT); + int flags = 0; + if (remaining_flags != nullptr) { + flags = remaining_flags[which_db]; + invariant(!(flags & DB_NOOVERWRITE_NO_ERROR)); + } + r = db_put(db, txn, &keys[which_db].dbts[i], &vals[which_db].dbts[i], flags, false); + if (r != 0) { + goto done; + } } } } } - return 0; +done: + return r; } static int @@ -754,20 +776,14 @@ env_put_multiple_internal( r = db_put_check_size_constraints(db, &put_key, &put_val); if (r != 0) goto cleanup; - //Check overwrite constraints - r = db_put_check_overwrite_constraint(db, txn, - &put_key, - lock_flags[which_db], remaining_flags[which_db]); - if (r != 0) goto cleanup; if (remaining_flags[which_db] == DB_NOOVERWRITE_NO_ERROR) { //put_multiple does not support delaying the no error, since we would //have to log the flag in the put_multiple. r = EINVAL; goto cleanup; } - //Do locking if necessary. Do not grab the lock again if this DB had a unique - //check performed because the lock was already grabbed by its cursor callback. - if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE) && !(remaining_flags[which_db] & DB_NOOVERWRITE)) { + //Do locking if necessary. + if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE)) { //Needs locking r = toku_db_get_point_write_lock(db, txn, &put_key); if (r != 0) goto cleanup; @@ -790,8 +806,10 @@ env_put_multiple_internal( } } toku_multi_operation_client_lock(); - log_put_multiple(txn, src_db, src_key, src_val, num_dbs, fts); - r = do_put_multiple(txn, num_dbs, db_array, put_keys, put_vals, src_db, src_key, indexer_shortcut); + r = do_put_multiple(txn, num_dbs, db_array, put_keys, put_vals, remaining_flags, src_db, src_key, indexer_shortcut); + if (r == 0) { + log_put_multiple(txn, src_db, src_key, src_val, num_dbs, fts); + } toku_multi_operation_client_unlock(); if (indexer_lock_taken) { toku_indexer_unlock(indexer); @@ -1075,7 +1093,7 @@ env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn, // recovery so we don't end up losing data. // So unlike env->put_multiple, we ONLY log a 'put_multiple' log entry. log_put_multiple(txn, src_db, new_src_key, new_src_data, n_put_dbs, put_fts); - r = do_put_multiple(txn, n_put_dbs, put_dbs, put_key_arrays, put_val_arrays, src_db, new_src_key, indexer_shortcut); + r = do_put_multiple(txn, n_put_dbs, put_dbs, put_key_arrays, put_val_arrays, nullptr, src_db, new_src_key, indexer_shortcut); } toku_multi_operation_client_unlock(); if (indexer_lock_taken) { diff --git a/storage/tokudb/ft-index/util/omt.cc b/storage/tokudb/ft-index/util/omt.cc index 92cda38aefe..709c7eab4c3 100644 --- a/storage/tokudb/ft-index/util/omt.cc +++ b/storage/tokudb/ft-index/util/omt.cc @@ -207,6 +207,9 @@ void omt<omtdata_t, omtdataout_t, supports_marks>::clone(const omt &src) { src.fill_array_with_subtree_values(&this->d.a.values[0], src.d.t.root); } this->d.a.num_values = src.size(); + if (supports_marks) { + this->convert_to_tree(); + } } template<typename omtdata_t, typename omtdataout_t, bool supports_marks> diff --git a/storage/tokudb/ft-index/util/tests/threadpool-nproc-limit.cc b/storage/tokudb/ft-index/util/tests/threadpool-nproc-limit.cc new file mode 100644 index 00000000000..f1ba10dad84 --- /dev/null +++ b/storage/tokudb/ft-index/util/tests/threadpool-nproc-limit.cc @@ -0,0 +1,171 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." + +// this test verifies that the toku thread pool is resilient when hitting the nproc limit. + +#include <util/threadpool.h> +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <string.h> +#include <unistd.h> +#include <errno.h> +#include <sys/resource.h> + +int verbose = 0; + +static int usage(void) { + fprintf(stderr, "[-q] [-v] [--verbose] (%d)\n", verbose); + return 1; +} + +static void *f(void *arg) { + return arg; +} + +static int dotest(int the_limit) { + if (verbose) + fprintf(stderr, "%s:%u %d\n", __FILE__, __LINE__, the_limit); + int r; + struct toku_thread_pool *pool = nullptr; + r = toku_thread_pool_create(&pool, 10); + assert(r == 0 && pool != nullptr); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = the_limit; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + int want_n = 20; + int got_n = want_n; + r = toku_thread_pool_run(pool, 0, &got_n, f, nullptr); + if (r == 0) + assert(want_n == got_n); + else { + assert(r == EWOULDBLOCK); + assert(got_n <= want_n); + } + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + if (verbose) + toku_thread_pool_print(pool, stderr); + toku_thread_pool_destroy(&pool); + return got_n > 0; +} + +int main(int argc, char *argv[]) { + // parse args + for (int i = 1; i < argc; i++) { + char *arg = argv[i]; + if (arg[0] != '-') + break; + if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) { + verbose = verbose+1; + continue; + } + if (strcmp(arg, "-q") == 0) { + verbose = verbose > 0 ? verbose-1 : 0; + continue; + } + return usage(); + } + // set increasing nproc limits until the test succeeds in hitting the limit after > 0 threads are created + for (int i = 0; 1; i++) { + if (dotest(i)) + break; + } + return 0; +} diff --git a/storage/tokudb/ft-index/util/threadpool.cc b/storage/tokudb/ft-index/util/threadpool.cc index d6652b7a71c..4f1105d83c2 100644 --- a/storage/tokudb/ft-index/util/threadpool.cc +++ b/storage/tokudb/ft-index/util/threadpool.cc @@ -132,13 +132,18 @@ static int toku_thread_create(struct toku_thread_pool *pool, struct toku_thread **toku_thread_return) { int r; struct toku_thread *MALLOC(thread); - if (thread == NULL) { + if (thread == nullptr) { r = get_error_errno(); } else { memset(thread, 0, sizeof *thread); thread->pool = pool; - toku_cond_init(&thread->wait, NULL); - r = toku_pthread_create(&thread->tid, NULL, toku_thread_run_internal, thread); resource_assert_zero(r); + toku_cond_init(&thread->wait, nullptr); + r = toku_pthread_create(&thread->tid, nullptr, toku_thread_run_internal, thread); + if (r) { + toku_cond_destroy(&thread->wait); + toku_free(thread); + thread = nullptr; + } *toku_thread_return = thread; } return r; @@ -192,7 +197,7 @@ toku_thread_run_internal(void *arg) { if (doexit) break; toku_thread_pool_lock(pool); - thread->f = NULL; + thread->f = nullptr; toku_list_push(&pool->free_threads, &thread->free_link); } return arg; @@ -202,13 +207,13 @@ int toku_thread_pool_create(struct toku_thread_pool **pool_return, int max_threads) { int r; struct toku_thread_pool *CALLOC(pool); - if (pool == NULL) { + if (pool == nullptr) { r = get_error_errno(); } else { - toku_mutex_init(&pool->lock, NULL); + toku_mutex_init(&pool->lock, nullptr); toku_list_init(&pool->free_threads); toku_list_init(&pool->all_threads); - toku_cond_init(&pool->wait_free, NULL); + toku_cond_init(&pool->wait_free, nullptr); pool->cur_threads = 0; pool->max_threads = max_threads; *pool_return = pool; @@ -230,7 +235,7 @@ toku_thread_pool_unlock(struct toku_thread_pool *pool) { void toku_thread_pool_destroy(struct toku_thread_pool **poolptr) { struct toku_thread_pool *pool = *poolptr; - *poolptr = NULL; + *poolptr = nullptr; // ask the threads to exit toku_thread_pool_lock(pool); @@ -260,7 +265,7 @@ toku_thread_pool_destroy(struct toku_thread_pool **poolptr) { static int toku_thread_pool_add(struct toku_thread_pool *pool) { - struct toku_thread *thread = NULL; + struct toku_thread *thread = nullptr; int r = toku_thread_create(pool, &thread); if (r == 0) { pool->cur_threads += 1; @@ -294,7 +299,7 @@ toku_thread_pool_get_one(struct toku_thread_pool *pool, int dowait, struct toku_ struct toku_thread *thread = toku_list_struct(list, struct toku_thread, free_link); *toku_thread_return = thread; } else - *toku_thread_return = NULL; + *toku_thread_return = nullptr; toku_thread_pool_unlock(pool); return r; } diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 9729b0ecf40..afcf93a2adb 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -120,14 +120,6 @@ extern "C" { #include "hatoku_defines.h" #include "hatoku_cmp.h" -static inline void *thd_data_get(THD *thd, int slot) { - return thd->ha_data[slot].ha_ptr; -} - -static inline void thd_data_set(THD *thd, int slot, void *data) { - thd->ha_data[slot].ha_ptr = data; -} - static inline uint get_key_parts(const KEY *key); #undef PACKAGE @@ -509,7 +501,6 @@ typedef struct index_read_info { DBT* orig_key; } *INDEX_READ_INFO; - static int ai_poll_fun(void *extra, float progress) { LOADER_CONTEXT context = (LOADER_CONTEXT)extra; if (context->thd->killed) { @@ -1048,8 +1039,7 @@ static uchar* pack_toku_field_blob( static int create_tokudb_trx_data_instance(tokudb_trx_data** out_trx) { int error; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) tokudb_my_malloc(sizeof(*trx), MYF(MY_ZEROFILL)); + tokudb_trx_data* trx = (tokudb_trx_data *) tokudb_my_malloc(sizeof(*trx), MYF(MY_ZEROFILL)); if (!trx) { error = ENOMEM; goto cleanup; @@ -1646,8 +1636,7 @@ int ha_tokudb::initialize_share( DB_TXN* txn = NULL; bool do_commit = false; THD* thd = ha_thd(); - tokudb_trx_data *trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) { txn = trx->sub_sp_level; } @@ -1759,7 +1748,7 @@ int ha_tokudb::initialize_share( } share->ref_length = ref_length; - error = estimate_num_rows(share->file,&num_rows, txn); + error = estimate_num_rows(share->file, &num_rows, txn); // // estimate_num_rows should not fail under normal conditions // @@ -1969,7 +1958,6 @@ exit: // int ha_tokudb::estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn) { int error = ENOSYS; - DBC* crsr = NULL; bool do_commit = false; DB_BTREE_STAT64 dict_stats; DB_TXN* txn_to_use = NULL; @@ -1983,21 +1971,12 @@ int ha_tokudb::estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn) { txn_to_use = txn; } - error = db->stat64( - share->file, - txn_to_use, - &dict_stats - ); + error = db->stat64(db, txn_to_use, &dict_stats); if (error) { goto cleanup; } *num_rows = dict_stats.bt_ndata; error = 0; cleanup: - if (crsr != NULL) { - int r = crsr->c_close(crsr); - assert(r==0); - crsr = NULL; - } if (do_commit) { commit_txn(txn_to_use, 0); txn_to_use = NULL; @@ -3303,7 +3282,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) { TOKUDB_HANDLER_DBUG_ENTER("%llu txn %p", (unsigned long long) rows, transaction); #endif THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); delay_updating_ai_metadata = true; ai_metadata_update_required = false; abort_loader = false; @@ -3313,7 +3292,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) { num_DBs_locked_in_bulk = true; lock_count = 0; - if (share->try_table_lock) { + if ((rows == 0 || rows > 1) && share->try_table_lock) { if (get_prelock_empty(thd) && may_table_be_empty(transaction)) { if (using_ignore || is_insert_ignore(thd) || thd->lex->duplicates != DUP_ERROR || table->s->next_number_key_offset) { @@ -3372,7 +3351,7 @@ int ha_tokudb::end_bulk_insert(bool abort) { TOKUDB_HANDLER_DBUG_ENTER(""); int error = 0; THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); bool using_loader = (loader != NULL); if (ai_metadata_update_required) { tokudb_pthread_mutex_lock(&share->mutex); @@ -3386,10 +3365,10 @@ int ha_tokudb::end_bulk_insert(bool abort) { if (loader) { if (!abort_loader && !thd->killed) { DBUG_EXECUTE_IF("tokudb_end_bulk_insert_sleep", { - const char *old_proc_info = tokudb_thd_get_proc_info(thd); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); thd_proc_info(thd, "DBUG sleep"); my_sleep(20000000); - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); }); error = loader->close(loader); loader = NULL; @@ -3406,12 +3385,8 @@ int ha_tokudb::end_bulk_insert(bool abort) { if (i == primary_key && !share->pk_has_string) { continue; } - error = is_index_unique( - &is_unique, - transaction, - share->key_file[i], - &table->key_info[i] - ); + error = is_index_unique(&is_unique, transaction, share->key_file[i], &table->key_info[i], + DB_PRELOCKED_WRITE); if (error) goto cleanup; if (!is_unique) { error = HA_ERR_FOUND_DUPP_KEY; @@ -3451,6 +3426,7 @@ cleanup: } } trx->stmt_progress.using_loader = false; + thd_proc_info(thd, 0); TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error); } @@ -3458,7 +3434,7 @@ int ha_tokudb::end_bulk_insert() { return end_bulk_insert( false ); } -int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info) { +int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags) { int error; DBC* tmp_cursor1 = NULL; DBC* tmp_cursor2 = NULL; @@ -3466,7 +3442,7 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in uint64_t cnt = 0; char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound. THD* thd = ha_thd(); - const char *old_proc_info = tokudb_thd_get_proc_info(thd); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); memset(&key1, 0, sizeof(key1)); memset(&key2, 0, sizeof(key2)); memset(&val, 0, sizeof(val)); @@ -3474,49 +3450,23 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in memset(&packed_key2, 0, sizeof(packed_key2)); *is_unique = true; - error = db->cursor( - db, - txn, - &tmp_cursor1, - DB_SERIALIZABLE - ); + error = db->cursor(db, txn, &tmp_cursor1, DB_SERIALIZABLE); if (error) { goto cleanup; } - error = db->cursor( - db, - txn, - &tmp_cursor2, - DB_SERIALIZABLE - ); + error = db->cursor(db, txn, &tmp_cursor2, DB_SERIALIZABLE); if (error) { goto cleanup; } - - error = tmp_cursor1->c_get( - tmp_cursor1, - &key1, - &val, - DB_NEXT - ); + error = tmp_cursor1->c_get(tmp_cursor1, &key1, &val, DB_NEXT + lock_flags); if (error == DB_NOTFOUND) { *is_unique = true; error = 0; goto cleanup; } else if (error) { goto cleanup; } - error = tmp_cursor2->c_get( - tmp_cursor2, - &key2, - &val, - DB_NEXT - ); + error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags); if (error) { goto cleanup; } - error = tmp_cursor2->c_get( - tmp_cursor2, - &key2, - &val, - DB_NEXT - ); + error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags); if (error == DB_NOTFOUND) { *is_unique = true; error = 0; @@ -3528,59 +3478,25 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in bool has_null1; bool has_null2; int cmp; - place_key_into_mysql_buff( - key_info, - table->record[0], - (uchar *) key1.data + 1 - ); - place_key_into_mysql_buff( - key_info, - table->record[1], - (uchar *) key2.data + 1 - ); + place_key_into_mysql_buff(key_info, table->record[0], (uchar *) key1.data + 1); + place_key_into_mysql_buff(key_info, table->record[1], (uchar *) key2.data + 1); - create_dbt_key_for_lookup( - &packed_key1, - key_info, - key_buff, - table->record[0], - &has_null1 - ); - create_dbt_key_for_lookup( - &packed_key2, - key_info, - key_buff2, - table->record[1], - &has_null2 - ); + create_dbt_key_for_lookup(&packed_key1, key_info, key_buff, table->record[0], &has_null1); + create_dbt_key_for_lookup(&packed_key2, key_info, key_buff2, table->record[1], &has_null2); if (!has_null1 && !has_null2) { cmp = tokudb_prefix_cmp_dbt_key(db, &packed_key1, &packed_key2); if (cmp == 0) { memcpy(key_buff, key1.data, key1.size); - place_key_into_mysql_buff( - key_info, - table->record[0], - (uchar *) key_buff + 1 - ); + place_key_into_mysql_buff(key_info, table->record[0], (uchar *) key_buff + 1); *is_unique = false; break; } } - error = tmp_cursor1->c_get( - tmp_cursor1, - &key1, - &val, - DB_NEXT - ); + error = tmp_cursor1->c_get(tmp_cursor1, &key1, &val, DB_NEXT + lock_flags); if (error) { goto cleanup; } - error = tmp_cursor2->c_get( - tmp_cursor2, - &key2, - &val, - DB_NEXT - ); + error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags); if (error && (error != DB_NOTFOUND)) { goto cleanup; } cnt++; @@ -3603,7 +3519,7 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in error = 0; cleanup: - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); if (tmp_cursor1) { tmp_cursor1->c_close(tmp_cursor1); tmp_cursor1 = NULL; @@ -4104,7 +4020,7 @@ int ha_tokudb::write_row(uchar * record) { } } - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!error) { added_rows++; trx->stmt_progress.inserted++; @@ -4174,7 +4090,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { THD* thd = ha_thd(); DB_TXN* sub_trans = NULL; DB_TXN* txn = NULL; - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); uint curr_num_DBs; LINT_INIT(error); @@ -4348,7 +4264,7 @@ int ha_tokudb::delete_row(const uchar * record) { bool has_null; THD* thd = ha_thd(); uint curr_num_DBs; - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; ha_statistic_increment(&SSV::ha_delete_count); @@ -4915,7 +4831,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ int error = 0; uint32_t flags = 0; THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; struct smart_dbt_info info; struct index_read_info ir_info; @@ -5393,7 +5309,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_ int error = 0; uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; bool need_val; HANDLE_INVALID_CURSOR(); @@ -5546,7 +5462,7 @@ int ha_tokudb::index_first(uchar * buf) { struct smart_dbt_info info; uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; HANDLE_INVALID_CURSOR(); ha_statistic_increment(&SSV::ha_read_first_count); @@ -5589,7 +5505,7 @@ int ha_tokudb::index_last(uchar * buf) { struct smart_dbt_info info; uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; HANDLE_INVALID_CURSOR(); ha_statistic_increment(&SSV::ha_read_last_count); @@ -5680,7 +5596,7 @@ int ha_tokudb::rnd_next(uchar * buf) { void ha_tokudb::track_progress(THD* thd) { - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (trx) { ulonglong num_written = trx->stmt_progress.inserted + trx->stmt_progress.updated + trx->stmt_progress.deleted; bool update_status = @@ -6270,12 +6186,11 @@ int ha_tokudb::external_lock(THD * thd, int lock_type) { } int error = 0; - tokudb_trx_data *trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!trx) { error = create_tokudb_trx_data_instance(&trx); if (error) { goto cleanup; } - thd_data_set(thd, tokudb_hton->slot, trx); + thd_set_ha_data(thd, tokudb_hton, trx); } if (trx->all == NULL) { trx->sp_level = NULL; @@ -6349,7 +6264,7 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) { TOKUDB_HANDLER_TRACE("q %s", thd->query()); int error = 0; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); DBUG_ASSERT(trx); /* @@ -6449,7 +6364,7 @@ uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD lock (if we don't want to use MySQL table locks at all) or add locks for many tables (like we do when we are using a MERGE handler). - Tokudb DB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which + TokuDB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which signals that we are doing WRITES, but we are still allowing other reader's and writer's. @@ -6471,34 +6386,25 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l } if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { - // if creating a hot index - if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX && get_create_index_online(thd)) { - rw_rdlock(&share->num_DBs_lock); - if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) { - lock_type = TL_WRITE_ALLOW_WRITE; - } - lock.type = lock_type; - rw_unlock(&share->num_DBs_lock); - } - - // 5.5 supports reads concurrent with alter table. just use the default lock type. -#if MYSQL_VERSION_ID < 50500 - else if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX || - thd_sql_command(thd)== SQLCOM_ALTER_TABLE || - thd_sql_command(thd)== SQLCOM_DROP_INDEX) { - // force alter table to lock out other readers - lock_type = TL_WRITE; - lock.type = lock_type; - } -#endif - else { - // If we are not doing a LOCK TABLE, then allow multiple writers - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && - !thd->in_lock_tables && thd_sql_command(thd) != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) { + enum_sql_command sql_command = (enum_sql_command) thd_sql_command(thd); + if (!thd->in_lock_tables) { + if (sql_command == SQLCOM_CREATE_INDEX && get_create_index_online(thd)) { + // hot indexing + rw_rdlock(&share->num_DBs_lock); + if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) { + lock_type = TL_WRITE_ALLOW_WRITE; + } + rw_unlock(&share->num_DBs_lock); + } else if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && + sql_command != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) { + // allow concurrent writes lock_type = TL_WRITE_ALLOW_WRITE; + } else if (sql_command == SQLCOM_OPTIMIZE && lock_type == TL_READ_NO_INSERT) { + // hot optimize table + lock_type = TL_READ; } - lock.type = lock_type; } + lock.type = lock_type; } *to++ = &lock; if (tokudb_debug & TOKUDB_DEBUG_LOCK) @@ -6954,7 +6860,7 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME)); if (newname == NULL){ error = ENOMEM; goto cleanup;} - trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); if (trx && trx->sub_sp_level && thd_sql_command(thd) == SQLCOM_CREATE_TABLE) { txn = trx->sub_sp_level; } @@ -7144,7 +7050,7 @@ int ha_tokudb::delete_or_rename_table (const char* from_name, const char* to_nam DB_TXN *parent_txn = NULL; tokudb_trx_data *trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (thd_sql_command(ha_thd()) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) { parent_txn = trx->sub_sp_level; } @@ -7585,7 +7491,7 @@ int ha_tokudb::tokudb_add_index( DBC* tmp_cursor = NULL; int cursor_ret_val = 0; DBT curr_pk_key, curr_pk_val; - THD* thd = ha_thd(); + THD* thd = ha_thd(); DB_LOADER* loader = NULL; DB_INDEXER* indexer = NULL; bool loader_save_space = get_load_save_space(thd); @@ -7623,7 +7529,7 @@ int ha_tokudb::tokudb_add_index( // // status message to be shown in "show process list" // - const char *old_proc_info = tokudb_thd_get_proc_info(thd); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound. ulonglong num_processed = 0; //variable that stores number of elements inserted thus far thd_proc_info(thd, "Adding indexes"); @@ -7849,7 +7755,8 @@ int ha_tokudb::tokudb_add_index( num_processed++; if ((num_processed % 1000) == 0) { - sprintf(status_msg, "Adding indexes: Fetched %llu of about %llu rows, loading of data still remains.", num_processed, (long long unsigned) share->rows); + sprintf(status_msg, "Adding indexes: Fetched %llu of about %llu rows, loading of data still remains.", + num_processed, (long long unsigned) share->rows); thd_proc_info(thd, status_msg); #ifdef HA_TOKUDB_HAS_THD_PROGRESS @@ -7881,12 +7788,8 @@ int ha_tokudb::tokudb_add_index( for (uint i = 0; i < num_of_keys; i++, curr_index++) { if (key_info[i].flags & HA_NOSAME) { bool is_unique; - error = is_index_unique( - &is_unique, - txn, - share->key_file[curr_index], - &key_info[i] - ); + error = is_index_unique(&is_unique, txn, share->key_file[curr_index], &key_info[i], + creating_hot_index ? 0 : DB_PRELOCKED_WRITE); if (error) goto cleanup; if (!is_unique) { error = HA_ERR_FOUND_DUPP_KEY; @@ -7944,7 +7847,7 @@ cleanup: another transaction has accessed the table. \ To add indexes, make sure no transactions touch the table.", share->table_name); } - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error); } @@ -8296,12 +8199,12 @@ void ha_tokudb::cleanup_txn(DB_TXN *txn) { } void ha_tokudb::add_to_trx_handler_list() { - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); trx->handlers = list_add(trx->handlers, &trx_handler_list); } void ha_tokudb::remove_from_trx_handler_list() { - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); trx->handlers = list_delete(trx->handlers, &trx_handler_list); } diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h index 35ce601daf0..adc2e055174 100644 --- a/storage/tokudb/ha_tokudb.h +++ b/storage/tokudb/ha_tokudb.h @@ -109,15 +109,6 @@ typedef struct loader_context { ha_tokudb* ha; } *LOADER_CONTEXT; -typedef struct hot_optimize_context { - THD *thd; - char* write_status_msg; - ha_tokudb *ha; - uint progress_stage; - uint current_table; - uint num_tables; -} *HOT_OPTIMIZE_CONTEXT; - // // This object stores table information that is to be shared // among all ha_tokudb objects. @@ -475,7 +466,7 @@ private: ); int create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method); void trace_create_table_info(const char *name, TABLE * form); - int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info); + int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags); int is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn); int do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd); void set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags); @@ -809,6 +800,7 @@ private: void remove_from_trx_handler_list(); private: + int do_optimize(THD *thd); int map_to_handler_error(int error); }; diff --git a/storage/tokudb/ha_tokudb_admin.cc b/storage/tokudb/ha_tokudb_admin.cc index 8d202eeda41..100c88a76a8 100644 --- a/storage/tokudb/ha_tokudb_admin.cc +++ b/storage/tokudb/ha_tokudb_admin.cc @@ -128,8 +128,15 @@ static int analyze_progress(void *v_extra, uint64_t rows) { int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) { TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); uint64_t rec_per_key[table_share->key_parts]; int result = HA_ADMIN_OK; + + // stub out analyze if optimize is remapped to alter recreate + analyze + if (thd_sql_command(thd) != SQLCOM_ANALYZE) { + TOKUDB_HANDLER_DBUG_RETURN(result); + } + DB_TXN *txn = transaction; if (!txn) { result = HA_ADMIN_FAILED; @@ -168,9 +175,19 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) { if (error) result = HA_ADMIN_FAILED; } + thd_proc_info(thd, orig_proc_info); TOKUDB_HANDLER_DBUG_RETURN(result); } +typedef struct hot_optimize_context { + THD *thd; + char* write_status_msg; + ha_tokudb *ha; + uint progress_stage; + uint current_table; + uint num_tables; +} *HOT_OPTIMIZE_CONTEXT; + static int hot_poll_fun(void *extra, float progress) { HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra; if (context->thd->killed) { @@ -194,9 +211,9 @@ static int hot_poll_fun(void *extra, float progress) { } // flatten all DB's in this table, to do so, peform hot optimize on each db -int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { +int ha_tokudb::do_optimize(THD *thd) { TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); - + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); int error; uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); @@ -206,9 +223,7 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { thd_progress_init(thd, curr_num_DBs); #endif - // // for each DB, run optimize and hot_optimize - // for (uint i = 0; i < curr_num_DBs; i++) { DB* db = share->key_file[i]; error = db->optimize(db); @@ -228,14 +243,24 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { goto cleanup; } } - error = 0; -cleanup: +cleanup: #ifdef HA_TOKUDB_HAS_THD_PROGRESS thd_progress_end(thd); #endif + thd_proc_info(thd, orig_proc_info); + TOKUDB_HANDLER_DBUG_RETURN(error); +} +int ha_tokudb::optimize(THD *thd, HA_CHECK_OPT *check_opt) { + TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); + int error; +#if TOKU_OPTIMIZE_WITH_RECREATE + error = HA_ADMIN_TRY_ALTER; +#else + error = do_optimize(thd); +#endif TOKUDB_HANDLER_DBUG_RETURN(error); } @@ -266,10 +291,7 @@ static void ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) { int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) { TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); - - const char *old_proc_info = tokudb_thd_get_proc_info(thd); - thd_proc_info(thd, "tokudb::check"); - + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); int result = HA_ADMIN_OK; int r; @@ -321,6 +343,6 @@ int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) { } } } - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); TOKUDB_HANDLER_DBUG_RETURN(result); } diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc index 93ce40850a7..5062a2ae67b 100644 --- a/storage/tokudb/ha_tokudb_alter_56.cc +++ b/storage/tokudb/ha_tokudb_alter_56.cc @@ -122,6 +122,7 @@ public: expand_varchar_update_needed(false), expand_fixed_update_needed(false), expand_blob_update_needed(false), + optimize_needed(false), table_kc_info(NULL), altered_table_kc_info(NULL) { } @@ -141,6 +142,7 @@ public: bool expand_varchar_update_needed; bool expand_fixed_update_needed; bool expand_blob_update_needed; + bool optimize_needed; Dynamic_array<uint> changed_fields; KEY_AND_COL_INFO *table_kc_info; KEY_AND_COL_INFO *altered_table_kc_info; @@ -219,8 +221,10 @@ static bool change_type_is_supported(TABLE *table, TABLE *altered_table, Alter_i static ulong fix_handler_flags(THD *thd, TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info) { ulong handler_flags = ha_alter_info->handler_flags; +#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099 // This is automatically supported, hide the flag from later checks handler_flags &= ~Alter_inplace_info::ALTER_PARTITIONED; +#endif // workaround for fill_alter_inplace_info bug (#5193) // the function erroneously sets the ADD_INDEX and DROP_INDEX flags for a column addition that does not @@ -437,7 +441,13 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(TABLE *alt result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK; } } + } +#if TOKU_OPTIMIZE_WITH_RECREATE + else if (only_flags(ctx->handler_flags, Alter_inplace_info::RECREATE_TABLE + Alter_inplace_info::ALTER_COLUMN_DEFAULT)) { + ctx->optimize_needed = true; + result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE; } +#endif if (result != HA_ALTER_INPLACE_NOT_SUPPORTED && table->s->null_bytes != altered_table->s->null_bytes && (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE)) { @@ -520,6 +530,9 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha if (error == 0 && ctx->reset_card) { error = tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s); } + if (error == 0 && ctx->optimize_needed) { + error = do_optimize(ha_thd()); + } #if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) @@ -756,7 +769,7 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i if (!commit) { // abort the alter transaction NOW so that any alters are rolled back. this allows the following restores to work. - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); assert(ctx->alter_txn == trx->stmt); assert(trx->tokudb_lock_count > 0); // for partitioned tables, we use a single transaction to do all of the partition changes. the tokudb_lock_count diff --git a/storage/tokudb/ha_tokudb_alter_common.cc b/storage/tokudb/ha_tokudb_alter_common.cc index ecef0fb7415..414e8280daf 100644 --- a/storage/tokudb/ha_tokudb_alter_common.cc +++ b/storage/tokudb/ha_tokudb_alter_common.cc @@ -814,7 +814,7 @@ int ha_tokudb::write_frm_data(const uchar *frm_data, size_t frm_len) { if (TOKU_PARTITION_WRITE_FRM_DATA || table->part_info == NULL) { // write frmdata to status THD *thd = ha_thd(); - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); assert(trx); DB_TXN *txn = trx->stmt; // use alter table transaction assert(txn); diff --git a/storage/tokudb/hatoku_defines.h b/storage/tokudb/hatoku_defines.h index 73e6addb529..d8d8fb479d6 100644 --- a/storage/tokudb/hatoku_defines.h +++ b/storage/tokudb/hatoku_defines.h @@ -96,6 +96,10 @@ PATENT RIGHTS GRANT: #pragma interface /* gcc class implementation */ #endif +#if !defined(TOKUDB_CHECK_JEMALLOC) +#define TOKUDB_CHECK_JEMALLOC 1 +#endif + #if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099 // mariadb 10.0 #define TOKU_USE_DB_TYPE_TOKUDB 1 @@ -108,6 +112,7 @@ PATENT RIGHTS GRANT: #define TOKU_INCLUDE_EXTENDED_KEYS 1 #endif #define TOKU_INCLUDE_OPTION_STRUCTS 1 +#define TOKU_OPTIMIZE_WITH_RECREATE 1 #elif 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799 // mysql 5.7 with no patches @@ -130,17 +135,18 @@ PATENT RIGHTS GRANT: #define TOKU_PARTITION_WRITE_FRM_DATA 0 #else // mysql 5.6 with tokutek patches -#define TOKU_USE_DB_TYPE_TOKUDB 1 /* has DB_TYPE_TOKUDB patch */ +#define TOKU_USE_DB_TYPE_TOKUDB 1 // has DB_TYPE_TOKUDB patch #define TOKU_INCLUDE_ALTER_56 1 -#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1 /* has tokudb row format compression patch */ -#define TOKU_INCLUDE_XA 1 /* has patch that fixes TC_LOG_MMAP code */ +#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1 // has tokudb row format compression patch +#define TOKU_INCLUDE_XA 1 // has patch that fixes TC_LOG_MMAP code #define TOKU_PARTITION_WRITE_FRM_DATA 0 #define TOKU_INCLUDE_WRITE_FRM_DATA 0 -#define TOKU_INCLUDE_UPSERT 1 /* has tokudb upsert patch */ +#define TOKU_INCLUDE_UPSERT 1 // has tokudb upsert patch #if defined(HTON_SUPPORTS_EXTENDED_KEYS) #define TOKU_INCLUDE_EXTENDED_KEYS 1 #endif #endif +#define TOKU_OPTIMIZE_WITH_RECREATE 1 #elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599 #define TOKU_USE_DB_TYPE_TOKUDB 1 diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index 2291f2345bb..9e9d90f8c9c 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -579,9 +579,6 @@ static int tokudb_done_func(void *p) { toku_global_status_rows = NULL; my_hash_free(&tokudb_open_tables); tokudb_pthread_mutex_destroy(&tokudb_mutex); -#if defined(_WIN64) - toku_ydb_destroy(); -#endif TOKUDB_DBUG_RETURN(0); } @@ -627,8 +624,7 @@ int tokudb_end(handlerton * hton, ha_panic_function type) { static int tokudb_close_connection(handlerton * hton, THD * thd) { int error = 0; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (trx && trx->checkpoint_lock_taken) { error = db_env->checkpointing_resume(db_env); } @@ -692,25 +688,27 @@ static void txn_progress_func(TOKU_TXN_PROGRESS progress, void* extra) { } static void commit_txn_with_progress(DB_TXN* txn, uint32_t flags, THD* thd) { - int r; + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); struct txn_progress_info info; info.thd = thd; - r = txn->commit_with_progress(txn, flags, txn_progress_func, &info); + int r = txn->commit_with_progress(txn, flags, txn_progress_func, &info); if (r != 0) { sql_print_error("tried committing transaction %p and got error code %d", txn, r); } assert(r == 0); + thd_proc_info(thd, orig_proc_info); } static void abort_txn_with_progress(DB_TXN* txn, THD* thd) { - int r; + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); struct txn_progress_info info; info.thd = thd; - r = txn->abort_with_progress(txn, txn_progress_func, &info); + int r = txn->abort_with_progress(txn, txn_progress_func, &info); if (r != 0) { sql_print_error("tried aborting transaction %p and got error code %d", txn, r); } assert(r == 0); + thd_proc_info(thd, orig_proc_info); } static void tokudb_cleanup_handlers(tokudb_trx_data *trx, DB_TXN *txn) { @@ -726,7 +724,7 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) { TOKUDB_DBUG_ENTER(""); DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt")); uint32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); DB_TXN **txn = all ? &trx->all : &trx->stmt; DB_TXN *this_txn = *txn; if (this_txn) { @@ -755,7 +753,7 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) { static int tokudb_rollback(handlerton * hton, THD * thd, bool all) { TOKUDB_DBUG_ENTER(""); DBUG_PRINT("trans", ("aborting transaction %s", all ? "all" : "stmt")); - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); DB_TXN **txn = all ? &trx->all : &trx->stmt; DB_TXN *this_txn = *txn; if (this_txn) { @@ -785,7 +783,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { TOKUDB_DBUG_ENTER(""); int r = 0; DBUG_PRINT("trans", ("preparing transaction %s", all ? "all" : "stmt")); - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); DB_TXN* txn = all ? trx->all : trx->stmt; if (txn) { if (tokudb_debug & TOKUDB_DEBUG_TXN) { @@ -864,7 +862,7 @@ static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) { TOKUDB_DBUG_ENTER(""); int error; SP_INFO save_info = (SP_INFO)savepoint; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); if (thd->in_sub_stmt) { assert(trx->stmt); error = txn_begin(db_env, trx->sub_sp_level, &(save_info->txn), DB_INHERIT_ISOLATION, thd); @@ -895,7 +893,7 @@ static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *save DB_TXN* parent = NULL; DB_TXN* txn_to_rollback = save_info->txn; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); parent = txn_to_rollback->parent; if (!(error = txn_to_rollback->abort(txn_to_rollback))) { if (save_info->in_sub_stmt) { @@ -917,7 +915,7 @@ static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoin DB_TXN* parent = NULL; DB_TXN* txn_to_commit = save_info->txn; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); parent = txn_to_commit->parent; if (!(error = txn_to_commit->commit(txn_to_commit, 0))) { if (save_info->in_sub_stmt) { @@ -974,10 +972,10 @@ static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const ch HA_METADATA_KEY curr_key = hatoku_frm_data; DBT key = {}; DBT value = {}; - bool do_commit; + bool do_commit = false; #if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099 - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) { do_commit = false; txn = trx->sub_sp_level; @@ -1132,15 +1130,14 @@ static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) { static void tokudb_checkpoint_lock(THD * thd) { int error; const char *old_proc_info; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!trx) { error = create_tokudb_trx_data_instance(&trx); // // can only fail due to memory allocation, so ok to assert // assert(!error); - thd_data_set(thd, tokudb_hton->slot, trx); + thd_set_ha_data(thd, tokudb_hton, trx); } if (trx->checkpoint_lock_taken) { @@ -1164,8 +1161,7 @@ cleanup: static void tokudb_checkpoint_unlock(THD * thd) { int error; const char *old_proc_info; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!trx) { error = 0; goto cleanup; diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result index f84be01163f..10431bb812a 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result @@ -9,6 +9,7 @@ locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); set autocommit=0; insert into t values (1); @@ -26,7 +27,6 @@ commit; select * from information_schema.tokudb_locks; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time ERROR 23000: Duplicate entry '1' for key 'PRIMARY' @@ -38,6 +38,7 @@ locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time set autocommit=0; +set tokudb_prelock_empty=OFF; replace into t values (1); set autocommit=0; replace into t values (1); diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result index 1e0668164ff..10e3830506d 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result @@ -9,6 +9,7 @@ locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); set autocommit=0; insert into t values (1); diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result index ad252da448f..9fce0695983 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result @@ -12,7 +12,7 @@ set autocommit=0; insert into t values (2); insert into t values (4); insert into t values (6); -select * from information_schema.tokudb_locks order by locks_trx_id; +select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 TRX_ID MYSQL_ID ./test/t-main 0003000000 0003000000 diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result index 21a6b5d308c..c135f3858b4 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result @@ -6,6 +6,7 @@ set autocommit=0; select * from information_schema.tokudb_locks; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); set autocommit=0; insert into t values (1); @@ -16,7 +17,6 @@ commit; select * from information_schema.tokudb_locks; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 ERROR 23000: Duplicate entry '1' for key 'PRIMARY' commit; select * from information_schema.tokudb_locks; diff --git a/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result b/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result index d861a972388..ba5e6ab69f8 100644 --- a/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result +++ b/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result @@ -16,6 +16,7 @@ select * from foo; a optimize table foo; Table Op Msg_type Msg_text +test.foo optimize note Table does not support optimize, doing recreate + analyze instead test.foo optimize status OK select * from foo; a @@ -27,6 +28,7 @@ a 3 optimize table foo; Table Op Msg_type Msg_text +test.foo optimize note Table does not support optimize, doing recreate + analyze instead test.foo optimize status OK select * from foo; a diff --git a/storage/tokudb/mysql-test/tokudb/r/type_datetime.result b/storage/tokudb/mysql-test/tokudb/r/type_datetime.result index e6701b0a0b5..82f5ebe9600 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_datetime.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_datetime.result @@ -21,6 +21,7 @@ t delete from t1 where t > 0; optimize table t1; Table Op Msg_type Msg_text +test.t1 optimize note Table does not support optimize, doing recreate + analyze instead test.t1 optimize status OK check table t1; Table Op Msg_type Msg_text diff --git a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result index 647065bdedc..a432927eda2 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result @@ -799,10 +799,10 @@ ROUND(qty,3) dps ROUND(qty,dps) DROP TABLE t1; SELECT 1 % .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS '%'; % -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 SELECT MOD(1, .123456789123456789123456789123456789123456789123456789123456789123456789123456789) AS 'MOD()'; MOD() -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 create table t1 (f1 decimal(6,6),f2 decimal(6,6) zerofill); insert into t1 values (-0.123456,0.123456); select group_concat(f1),group_concat(f2) from t1; diff --git a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result index 14ff0fbd1ca..3f76f54609f 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result @@ -704,7 +704,7 @@ select .7777777777777777777777777777777777777 * 777777777777777777.777777777777777777700000000000 select .7777777777777777777777777777777777777 - 0.1; .7777777777777777777777777777777777777 - 0.1 -0.6777777777777777777777777777777777777 +0.677777777777777777777777777778 select .343434343434343434 + .343434343434343434; .343434343434343434 + .343434343434343434 0.686868686868686868 @@ -1841,7 +1841,7 @@ Warnings: Note 1265 Data truncated for column 'c1' at row 4 DESC t2; Field Type Null Key Default Extra -c1 decimal(32,30) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); @@ -1852,7 +1852,7 @@ Note 1265 Data truncated for column 'c1' at row 2 Note 1265 Data truncated for column 'c1' at row 3 DESC t2; Field Type Null Key Default Extra -c1 decimal(34,0) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test index 6b4e5d88673..3b56660ff83 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test @@ -23,6 +23,7 @@ select * from information_schema.tokudb_lock_waits; connect (conn_a,localhost,root,,); set autocommit=0; +set tokudb_prelock_empty=OFF; # disable the bulk loader insert into t values (1); connect (conn_b,localhost,root,,); @@ -72,6 +73,7 @@ select * from information_schema.tokudb_lock_waits; connect (conn_a,localhost,root,,); set autocommit=0; +set tokudb_prelock_empty=OFF; # disable the bulk loader replace into t values (1); connect (conn_b,localhost,root,,); diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test index ea7eb9a2c89..d7925733a0f 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test @@ -16,6 +16,7 @@ select * from information_schema.tokudb_lock_waits; connect (conn_a,localhost,root,,); set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); connect (conn_b,localhost,root,,); diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test index a3745b5471b..e5a67559b1a 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test @@ -29,7 +29,7 @@ insert into t values (6); # should find 3 locks for 2 transactions connection default; replace_column 1 TRX_ID 2 MYSQL_ID; -eval select * from information_schema.tokudb_locks order by locks_trx_id; +eval select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left; connection conn_a; commit; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test index 3a1cf2023da..f2ca9b8ed9c 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test @@ -18,20 +18,17 @@ let $default_id=`select connection_id()`; # should be empty select * from information_schema.tokudb_locks; - connect (conn_a,localhost,root,,); set autocommit=0; -let $a_id=`select connection_id()`; +set tokudb_prelock_empty=OFF; # disable bulk loader insert into t values (1); connect (conn_b,localhost,root,,); set autocommit=0; -let $b_id=`select connection_id()`; send insert into t values (1); - # should find the presence of a lock on 2nd transaction connection default; let $wait_condition= select count(*)=1 from information_schema.processlist where info='insert into t values (1)' and state='update'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result b/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result index 6966aa24ff8..325aef46afe 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result @@ -1,6 +1,7 @@ set default_storage_engine=tokudb; drop table if exists t; create table t (id int, unique key(id)); +set tokudb_prelock_empty=OFF; begin; insert into t values (1); begin; @@ -13,6 +14,7 @@ id 2 drop table if exists t; create table t (id int not null, unique key(id)); +set tokudb_prelock_empty=OFF; begin; insert into t values (1); begin; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result b/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result index fe6dba5214e..f3ac15b220d 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result @@ -2,5 +2,6 @@ drop table if exists t; create temporary table t (x int) engine=tokudb; optimize table t; Table Op Msg_type Msg_text +test.t optimize note Table does not support optimize, doing recreate + analyze instead test.t optimize status OK drop table t; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test b/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test index 3f8d7113dff..0a001c2736d 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test @@ -7,6 +7,7 @@ enable_warnings; create table t (id int, unique key(id)); connect(c1,localhost,root,,); +set tokudb_prelock_empty=OFF; # disable the tokudb bulk loader begin; insert into t values (1); connect(c2,localhost,root,,); @@ -24,6 +25,7 @@ drop table if exists t; create table t (id int not null, unique key(id)); connect(c1,localhost,root,,); +set tokudb_prelock_empty=OFF; # disable the tokudb bulk loader begin; insert into t values (1); connect(c2,localhost,root,,); diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result index 5151f2bc895..c32a7d61129 100644 --- a/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result +++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result @@ -9,6 +9,7 @@ a 3 set debug_sync='now SIGNAL go'; Table Op Msg_type Msg_text +test.t1 optimize note Table does not support optimize, doing recreate + analyze instead test.t1 optimize status OK drop table t1; set debug_sync='reset'; diff --git a/storage/tokudb/scripts/setup.mysql.bash b/storage/tokudb/scripts/setup.mysql.bash index 6ae604e34c1..85132350289 100755 --- a/storage/tokudb/scripts/setup.mysql.bash +++ b/storage/tokudb/scripts/setup.mysql.bash @@ -5,6 +5,39 @@ function usage() { echo "--mysqlbuild=$mysqlbuild --shutdown=$shutdown --install=$install --startup=$startup" } +function download_file() { + local file=$1 + s3get $s3bucket $file $file +} + +function download_tarball() { + local tarball=$1 + if [ ! -f $tarball ] ; then + download_file $tarball + if [ $? -ne 0 ] ; then test 0 = 1; return; fi + fi + if [ ! -f $tarball.md5 ] ; then + download_file $tarball.md5 + if [ $? -ne 0 ] ; then test 0 = 1; return; fi + fi +} + +function install_tarball() { + local basedir=$1; local tarball=$2 + tar -x -z -f $basedir/$tarball + if [ $? -ne 0 ] ; then test 0 = 1; return; fi +} + +function check_md5() { + local tarball=$1 + md5sum --check $tarball.md5 + if [ $? -ne 0 ] ; then + # support jacksum md5 output which is almost the same as md5sum + diff -b <(cat $tarball.md5) <(md5sum $tarball) + if [ $? -ne 0 ] ; then test 0 = 1; return; fi + fi +} + mysqlbuild= shutdown=1 install=1 @@ -64,30 +97,24 @@ basedir=$PWD mysqltarball=$mysqlbuild.tar.gz -if [ -f $mysqlbuild.tar.gz ] ; then - compression=-z - mysqltarball=$mysqlbuild.tar.gz -elif [ -f $mysqlbuild.tar.bz2 ] ; then - compression=-j - mysqltarball=$mysqlbuild.tar.bz2 -fi - -# get the release -if [ ! -f $mysqltarball ] ; then - s3get $s3bucket $mysqltarball $mysqltarball - if [ $? -ne 0 ] ; then exit 1; fi -fi -if [ ! -f $mysqltarball.md5 ] ; then - s3get $s3bucket $mysqltarball.md5 $mysqltarball.md5 - if [ $? -ne 0 ] ; then exit 1; fi -fi +# get the tarball +download_tarball $mysqltarball +if [ $? -ne 0 ] ; then exit 1; fi # check the md5 sum -md5sum --check $mysqltarball.md5 -if [ $? -ne 0 ] ; then - # support jacksum md5 output which is almost the same as md5sum - diff -b <(cat $mysqltarball.md5) <(md5sum $mysqltarball) - if [ $? -ne 0 ] ; then exit 1; fi +check_md5 $mysqltarball +if [ $? -ne 0 ] ; then exit 1; fi + +tokudbtarball="" +if [[ $mysqltarball =~ ^(Percona-Server.*)\.(Linux\.x86_64.*)$ ]] ; then + tar tzf $mysqltarball | egrep ha_tokudb.so >/dev/null 2>&1 + if [ $? -ne 0 ] ; then + tokudbtarball=${BASH_REMATCH[1]}.TokuDB.${BASH_REMATCH[2]} + download_tarball $tokudbtarball + if [ $? -ne 0 ] ; then exit 1; fi + check_md5 $tokudbtarball + if [ $? -ne 0 ] ; then exit 1; fi + fi fi # set ldpath @@ -126,8 +153,14 @@ if [ ! -d $mysqlbuild ] || [ $install -ne 0 ] ; then rm mysql if [ -d $mysqlbuild ] ; then $sudo rm -rf $mysqlbuild; fi - tar -x $compression -f $basedir/$mysqltarball + install_tarball $basedir $mysqltarball if [ $? -ne 0 ] ; then exit 1; fi + + if [ $tokudbtarball ] ; then + install_tarball $basedir $tokudbtarball + if [ $? -ne 0 ] ; then exit 1; fi + fi + ln -s $mysqldir /usr/local/mysql if [ $? -ne 0 ] ; then exit 1; fi ln -s $mysqldir /usr/local/$mysqlbuild @@ -180,6 +213,10 @@ if [ $startup -ne 0 ] ; then else default_arg="--defaults-file=$defaultsfile" fi + j=/usr/local/mysql/lib/mysql/libjemalloc.so + if [ -f $j ] ; then + default_arg="$default_arg --malloc-lib=$j" + fi $sudo -b bash -c "$ldpath /usr/local/mysql/bin/mysqld_safe $default_arg $mysqld_args" >/dev/null 2>&1 & fi sleep $sleeptime diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc index 5e0473daa85..a074414aa7a 100644 --- a/storage/xtradb/btr/btr0cur.cc +++ b/storage/xtradb/btr/btr0cur.cc @@ -202,15 +202,6 @@ btr_rec_free_externally_stored_fields( mtr_t* mtr); /*!< in: mini-transaction handle which contains an X-latch to record page and to the index tree */ -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. -@return externally stored part, in units of a database page */ -static -ulint -btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ #endif /* !UNIV_HOTBACKUP */ /******************************************************//** @@ -4238,15 +4229,15 @@ btr_rec_get_field_ref_offs( #define btr_rec_get_field_ref(rec, offsets, n) \ ((rec) + btr_rec_get_field_ref_offs(offsets, n)) -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() @return externally stored part, in units of a database page */ -static + ulint btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ + const rec_t* rec, + const ulint* offsets) { ulint n_fields; ulint total_extern_len = 0; diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc index eec3c0b29aa..ac5e9aec67b 100644 --- a/storage/xtradb/btr/btr0sea.cc +++ b/storage/xtradb/btr/btr0sea.cc @@ -1944,7 +1944,10 @@ btr_search_validate_one_table( buf_pool_t* buf_pool; index_id_t page_index_id; - buf_pool = buf_pool_from_bpage((buf_page_t*) block); + buf_pool = buf_pool_from_bpage((buf_page_t *) block); + /* Prevent BUF_BLOCK_FILE_PAGE -> BUF_BLOCK_REMOVE_HASH + transition until we lock the block mutex */ + mutex_enter(&buf_pool->LRU_list_mutex); if (UNIV_LIKELY(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE)) { @@ -1980,6 +1983,7 @@ btr_search_validate_one_table( } mutex_enter(&block->mutex); + mutex_exit(&buf_pool->LRU_list_mutex); ut_a(!dict_index_is_ibuf(block->index)); diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index df9187109c7..d1656a4dd48 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -546,7 +546,7 @@ buf_flush_ready_for_flush( ut_ad(flush_type < BUF_FLUSH_N_TYPES); ut_ad(mutex_own(buf_page_get_mutex(bpage)) || flush_type == BUF_FLUSH_LIST); - ut_a(buf_page_in_file(bpage)); + ut_a(buf_page_in_file(bpage) || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH); if (bpage->oldest_modification == 0 || buf_page_get_io_fix_unlocked(bpage) != BUF_IO_NONE) { @@ -557,6 +557,7 @@ buf_flush_ready_for_flush( switch (flush_type) { case BUF_FLUSH_LIST: + return(buf_page_get_state(bpage) != BUF_BLOCK_REMOVE_HASH); case BUF_FLUSH_LRU: case BUF_FLUSH_SINGLE_PAGE: return(true); @@ -1363,7 +1364,8 @@ buf_flush_page_and_try_neighbors( } ut_a(buf_page_in_file(bpage) - || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH); + || (buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH + )); if (buf_flush_ready_for_flush(bpage, flush_type)) { buf_pool_t* buf_pool; @@ -1649,7 +1651,7 @@ buf_do_LRU_batch( { if (buf_LRU_evict_from_unzip_LRU(buf_pool)) { n->unzip_LRU_evicted - += buf_free_from_unzip_LRU_list_batch(buf_pool, max); + = buf_free_from_unzip_LRU_list_batch(buf_pool, max); } else { n->unzip_LRU_evicted = 0; } @@ -1948,6 +1950,7 @@ buf_flush_LRU( if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) { n->flushed = 0; n->evicted = 0; + n->unzip_LRU_evicted = 0; return(false); } @@ -2365,6 +2368,10 @@ af_get_pct_for_dirty() { ulint dirty_pct = buf_get_modified_ratio_pct(); + if (dirty_pct > 0 && srv_max_buf_pool_modified_pct == 0) { + return(100); + } + ut_a(srv_max_dirty_pages_pct_lwm <= srv_max_buf_pool_modified_pct); diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc index d0904f4b8ad..be6d38f5ef7 100644 --- a/storage/xtradb/buf/buf0lru.cc +++ b/storage/xtradb/buf/buf0lru.cc @@ -595,6 +595,8 @@ buf_flush_or_remove_pages( buf_page_t* bpage; ulint processed = 0; + ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + buf_flush_list_mutex_enter(buf_pool); rescan: @@ -2429,6 +2431,25 @@ buf_LRU_block_remove_hashed( " in the hash table\n", (ulong) bpage->space, (ulong) bpage->offset); + +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: in_page_hash %lu in_zip_hash %lu\n" + " in_free_list %lu in_flush_list %lu in_LRU_list %lu\n" + " zip.data %p zip_size %lu page_state %d\n", + bpage->in_page_hash, bpage->in_zip_hash, + bpage->in_free_list, bpage->in_flush_list, + bpage->in_LRU_list, bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#else + fprintf(stderr, + "InnoDB: zip.data %p zip_size %lu page_state %d\n", + bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#endif + if (hashed_bpage) { fprintf(stderr, "InnoDB: In hash table we find block" @@ -2439,6 +2460,9 @@ buf_LRU_block_remove_hashed( (const void*) bpage); } + ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); + ut_a(bpage->buf_fix_count == 0); + #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG mutex_exit(buf_page_get_mutex(bpage)); rw_lock_x_unlock(hash_lock); diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc index 08ebd386f8b..83604b3057a 100644 --- a/storage/xtradb/dict/dict0dict.cc +++ b/storage/xtradb/dict/dict0dict.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -50,6 +50,7 @@ UNIV_INTERN dict_index_t* dict_ind_compact; #include "btr0btr.h" #include "btr0cur.h" #include "btr0sea.h" +#include "os0once.h" #include "page0zip.h" #include "page0page.h" #include "pars0pars.h" @@ -102,7 +103,7 @@ UNIV_INTERN ulong zip_pad_max = 50; UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key; UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key; UNIV_INTERN mysql_pfs_key_t index_online_log_key; -UNIV_INTERN mysql_pfs_key_t dict_table_stats_latch_key; +UNIV_INTERN mysql_pfs_key_t dict_table_stats_key; #endif /* UNIV_PFS_RWLOCK */ #ifdef UNIV_PFS_MUTEX @@ -121,6 +122,11 @@ UNIV_INTERN mysql_pfs_key_t dict_foreign_err_mutex_key; /** Identifies generated InnoDB foreign key names */ static char dict_ibfk[] = "_ibfk_"; +bool innodb_table_stats_not_found = false; +bool innodb_index_stats_not_found = false; +static bool innodb_table_stats_not_found_reported = false; +static bool innodb_index_stats_not_found_reported = false; + /*******************************************************************//** Tries to find column names for the index and sets the col field of the index. @@ -319,6 +325,82 @@ dict_mutex_exit_for_mysql(void) mutex_exit(&(dict_sys->mutex)); } +/** Allocate and init a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table_void table whose stats latch to create */ +static +void +dict_table_stats_latch_alloc( + void* table_void) +{ + dict_table_t* table = static_cast<dict_table_t*>(table_void); + + table->stats_latch = new(std::nothrow) rw_lock_t; + + ut_a(table->stats_latch != NULL); + + rw_lock_create(dict_table_stats_key, table->stats_latch, + SYNC_INDEX_TREE); +} + +/** Deinit and free a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table table whose stats latch to free */ +static +void +dict_table_stats_latch_free( + dict_table_t* table) +{ + rw_lock_free(table->stats_latch); + delete table->stats_latch; +} + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled) +{ + if (!enabled) { + table->stats_latch = NULL; + table->stats_latch_created = os_once::DONE; + return; + } + +#ifdef HAVE_ATOMIC_BUILTINS + /* We create this lazily the first time it is used. */ + table->stats_latch = NULL; + table->stats_latch_created = os_once::NEVER_DONE; +#else /* HAVE_ATOMIC_BUILTINS */ + + dict_table_stats_latch_alloc(table); + + table->stats_latch_created = os_once::DONE; +#endif /* HAVE_ATOMIC_BUILTINS */ +} + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table) +{ + if (table->stats_latch_created == os_once::DONE + && table->stats_latch != NULL) { + + dict_table_stats_latch_free(table); + } +} + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. */ UNIV_INTERN @@ -331,6 +413,14 @@ dict_table_stats_lock( ut_ad(table != NULL); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); +#ifdef HAVE_ATOMIC_BUILTINS + os_once::do_or_wait_for_done( + &table->stats_latch_created, + dict_table_stats_latch_alloc, table); +#else /* HAVE_ATOMIC_BUILTINS */ + ut_ad(table->stats_latch_created == os_once::DONE); +#endif /* HAVE_ATOMIC_BUILTINS */ + if (table->stats_latch == NULL) { /* This is a dummy table object that is private in the current thread and is not shared between multiple threads, thus we @@ -5216,8 +5306,6 @@ dict_table_print( index = UT_LIST_GET_NEXT(indexes, index); } - table->stat_initialized = FALSE; - dict_table_stats_unlock(table, RW_X_LATCH); foreign = UT_LIST_GET_FIRST(table->foreign_list); @@ -6059,14 +6147,34 @@ dict_table_schema_check( table = dict_table_get_low(req_schema->table_name); if (table == NULL) { + bool should_print=true; /* no such table */ - ut_snprintf(errstr, errstr_sz, - "Table %s not found.", - ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf))); + if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_table_stats") == 0) { + if (innodb_table_stats_not_found_reported == false) { + innodb_table_stats_not_found = true; + innodb_table_stats_not_found_reported = true; + } else { + should_print = false; + } + } else if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_index_stats") == 0 ) { + if (innodb_index_stats_not_found_reported == false) { + innodb_index_stats_not_found = true; + innodb_index_stats_not_found_reported = true; + } else { + should_print = false; + } + } - return(DB_TABLE_NOT_FOUND); + if (should_print) { + ut_snprintf(errstr, errstr_sz, + "Table %s not found.", + ut_format_name(req_schema->table_name, + TRUE, buf, sizeof(buf))); + return(DB_TABLE_NOT_FOUND); + } else { + return(DB_STATS_DO_NOT_EXIST); + } } if (table->ibd_file_missing) { diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc index 7ce42fa8efc..b80f9fc5750 100644 --- a/storage/xtradb/dict/dict0mem.cc +++ b/storage/xtradb/dict/dict0mem.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -98,13 +98,9 @@ dict_mem_table_create( ut_d(table->magic_n = DICT_TABLE_MAGIC_N); - if (!nonshared) { - table->stats_latch = new rw_lock_t; - rw_lock_create(dict_table_stats_latch_key, table->stats_latch, - SYNC_INDEX_TREE); - } else { - table->stats_latch = NULL; - } + /* true means that the stats latch will be enabled - + dict_table_stats_lock() will not be noop. */ + dict_table_stats_latch_create(table, true); #ifndef UNIV_HOTBACKUP @@ -168,17 +164,13 @@ dict_mem_table_free( } } #ifndef UNIV_HOTBACKUP - if (table->stats_latch) { + if (table->autoinc_lock) { mutex_free(&(table->autoinc_mutex)); } #endif /* UNIV_HOTBACKUP */ - if (table->stats_latch) { - - rw_lock_free(table->stats_latch); - delete table->stats_latch; - } + dict_table_stats_latch_destroy(table); ut_free(table->name); mem_heap_free(table->heap); diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc index 928bdb3f2ef..1eac9e0df51 100644 --- a/storage/xtradb/dict/dict0stats.cc +++ b/storage/xtradb/dict/dict0stats.cc @@ -46,6 +46,7 @@ Created Jan 06, 2010 Vasil Dimov #include "ut0rnd.h" /* ut_rnd_interval() */ #include "ut0ut.h" /* ut_format_name(), ut_time() */ +#include <algorithm> #include <map> #include <vector> @@ -127,10 +128,11 @@ where n=1..n_uniq. #endif /* UNIV_STATS_DEBUG */ /* Gets the number of leaf pages to sample in persistent stats estimation */ -#define N_SAMPLE_PAGES(index) \ - ((index)->table->stats_sample_pages != 0 ? \ - (index)->table->stats_sample_pages : \ - srv_stats_persistent_sample_pages) +#define N_SAMPLE_PAGES(index) \ + static_cast<ib_uint64_t>( \ + (index)->table->stats_sample_pages != 0 \ + ? (index)->table->stats_sample_pages \ + : srv_stats_persistent_sample_pages) /* number of distinct records on a given level that are required to stop descending to lower levels and fetch N_SAMPLE_PAGES(index) records @@ -268,10 +270,12 @@ dict_stats_persistent_storage_check( mutex_exit(&(dict_sys->mutex)); } - if (ret != DB_SUCCESS) { + if (ret != DB_SUCCESS && ret != DB_STATS_DO_NOT_EXIST) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: %s\n", errstr); return(false); + } else if (ret == DB_STATS_DO_NOT_EXIST) { + return false; } /* else */ @@ -430,9 +434,9 @@ dict_stats_table_clone_create( t->corrupted = table->corrupted; /* This private object "t" is not shared with other threads, so - we do not need the stats_latch. The lock/unlock routines will do - nothing if stats_latch is NULL. */ - t->stats_latch = NULL; + we do not need the stats_latch (thus we pass false below). The + dict_table_stats_lock()/unlock() routines will do nothing. */ + dict_table_stats_latch_create(t, false); UT_LIST_INIT(t->indexes); @@ -508,6 +512,7 @@ dict_stats_table_clone_free( /*========================*/ dict_table_t* t) /*!< in: dummy table object to free */ { + dict_table_stats_latch_destroy(t); mem_heap_free(t->heap); } @@ -1283,35 +1288,40 @@ enum page_scan_method_t { }; /* @} */ -/*********************************************************************//** -Scan a page, reading records from left to right and counting the number -of distinct records on that page (looking only at the first n_prefix -columns). If scan_method is QUIT_ON_FIRST_NON_BORING then the function +/** Scan a page, reading records from left to right and counting the number +of distinct records (looking only at the first n_prefix +columns) and the number of external pages pointed by records from this page. +If scan_method is QUIT_ON_FIRST_NON_BORING then the function will return as soon as it finds a record that does not match its neighbor to the right, which means that in the case of QUIT_ON_FIRST_NON_BORING the returned n_diff can either be 0 (empty page), 1 (the whole page has all keys equal) or 2 (the function found a non-boring record and returned). +@param[out] out_rec record, or NULL +@param[out] offsets1 rec_get_offsets() working space (must +be big enough) +@param[out] offsets2 rec_get_offsets() working space (must +be big enough) +@param[in] index index of the page +@param[in] page the page to scan +@param[in] n_prefix look at the first n_prefix columns +@param[in] scan_method scan to the end of the page or not +@param[out] n_diff number of distinct records encountered +@param[out] n_external_pages if this is non-NULL then it will be set +to the number of externally stored pages which were encountered @return offsets1 or offsets2 (the offsets of *out_rec), or NULL if the page is empty and does not contain user records. */ -UNIV_INLINE __attribute__((nonnull)) +UNIV_INLINE ulint* dict_stats_scan_page( -/*=================*/ - const rec_t** out_rec, /*!< out: record, or NULL */ - ulint* offsets1, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - ulint* offsets2, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - dict_index_t* index, /*!< in: index of the page */ - const page_t* page, /*!< in: the page to scan */ - ulint n_prefix, /*!< in: look at the first - n_prefix columns */ - page_scan_method_t scan_method, /*!< in: scan to the end of - the page or not */ - ib_uint64_t* n_diff) /*!< out: number of distinct - records encountered */ + const rec_t** out_rec, + ulint* offsets1, + ulint* offsets2, + dict_index_t* index, + const page_t* page, + ulint n_prefix, + page_scan_method_t scan_method, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages) { ulint* offsets_rec = offsets1; ulint* offsets_next_rec = offsets2; @@ -1329,6 +1339,12 @@ dict_stats_scan_page( get_next = page_rec_get_next_const; } + const bool should_count_external_pages = n_external_pages != NULL; + + if (should_count_external_pages) { + *n_external_pages = 0; + } + rec = get_next(page_get_infimum_rec(page)); if (page_rec_is_supremum(rec)) { @@ -1341,6 +1357,11 @@ dict_stats_scan_page( offsets_rec = rec_get_offsets(rec, index, offsets_rec, ULINT_UNDEFINED, &heap); + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(rec); *n_diff = 1; @@ -1391,6 +1412,11 @@ dict_stats_scan_page( offsets_next_rec = offsets_tmp; } + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(next_rec); } @@ -1401,19 +1427,25 @@ func_exit: return(offsets_rec); } -/*********************************************************************//** -Dive below the current position of a cursor and calculate the number of +/** Dive below the current position of a cursor and calculate the number of distinct records on the leaf page, when looking at the fist n_prefix -columns. +columns. Also calculate the number of external pages pointed by records +on the leaf page. +@param[in] cur cursor +@param[in] n_prefix look at the first n_prefix columns +when comparing records +@param[out] n_diff number of distinct records +@param[out] n_external_pages number of external pages +@param[in,out] mtr mini-transaction @return number of distinct records on the leaf page */ static -ib_uint64_t +void dict_stats_analyze_index_below_cur( -/*===============================*/ - const btr_cur_t*cur, /*!< in: cursor */ - ulint n_prefix, /*!< in: look at the first n_prefix - columns when comparing records */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + const btr_cur_t* cur, + ulint n_prefix, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages, + mtr_t* mtr) { dict_index_t* index; ulint space; @@ -1426,7 +1458,6 @@ dict_stats_analyze_index_below_cur( ulint* offsets1; ulint* offsets2; ulint* offsets_rec; - ib_uint64_t n_diff; /* the result */ ulint size; index = btr_cur_get_index(cur); @@ -1462,6 +1493,10 @@ dict_stats_analyze_index_below_cur( page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec); + /* assume no external pages by default - in case we quit from this + function without analyzing any leaf pages */ + *n_external_pages = 0; + /* descend to the leaf level on the B-tree */ for (;;) { @@ -1480,20 +1515,24 @@ dict_stats_analyze_index_below_cur( /* search for the first non-boring record on the page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - QUIT_ON_FIRST_NON_BORING, &n_diff); + QUIT_ON_FIRST_NON_BORING, n_diff, NULL); /* pages on level > 0 are not allowed to be empty */ ut_a(offsets_rec != NULL); /* if page is not empty (offsets_rec != NULL) then n_diff must be > 0, otherwise there is a bug in dict_stats_scan_page() */ - ut_a(n_diff > 0); + ut_a(*n_diff > 0); - if (n_diff == 1) { + if (*n_diff == 1) { /* page has all keys equal and the end of the page was reached by dict_stats_scan_page(), no need to descend to the leaf level */ mem_heap_free(heap); - return(1); + /* can't get an estimate for n_external_pages here + because we do not dive to the leaf level, assume no + external pages (*n_external_pages was assigned to 0 + above). */ + return; } /* else */ @@ -1501,7 +1540,7 @@ dict_stats_analyze_index_below_cur( first non-boring record it finds, then the returned n_diff can either be 0 (empty page), 1 (page has all keys equal) or 2 (non-boring record was found) */ - ut_a(n_diff == 2); + ut_a(*n_diff == 2); /* we have a non-boring record in rec, descend below it */ @@ -1512,11 +1551,14 @@ dict_stats_analyze_index_below_cur( ut_ad(btr_page_get_level(page, mtr) == 0); /* scan the leaf page and find the number of distinct keys, - when looking only at the first n_prefix columns */ + when looking only at the first n_prefix columns; also estimate + the number of externally stored pages pointed by records on this + page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, &n_diff); + COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff, + n_external_pages); #if 0 DEBUG_PRINTF(" %s(): n_diff below page_no=%lu: " UINT64PF "\n", @@ -1524,133 +1566,146 @@ dict_stats_analyze_index_below_cur( #endif mem_heap_free(heap); - - return(n_diff); } -/*********************************************************************//** -For a given level in an index select N_SAMPLE_PAGES(index) -(or less) records from that level and dive below them to the corresponding -leaf pages, then scan those leaf pages and save the sampling results in -index->stat_n_diff_key_vals[n_prefix - 1] and the number of pages scanned in -index->stat_n_sample_sizes[n_prefix - 1]. */ +/** Input data that is used to calculate dict_index_t::stat_n_diff_key_vals[] +for each n-columns prefix (n from 1 to n_uniq). */ +struct n_diff_data_t { + /** Index of the level on which the descent through the btree + stopped. level 0 is the leaf level. This is >= 1 because we + avoid scanning the leaf level because it may contain too many + pages and doing so is useless when combined with the random dives - + if we are to scan the leaf level, this means a full scan and we can + simply do that instead of fiddling with picking random records higher + in the tree and to dive below them. At the start of the analyzing + we may decide to do full scan of the leaf level, but then this + structure is not used in that code path. */ + ulint level; + + /** Number of records on the level where the descend through the btree + stopped. When we scan the btree from the root, we stop at some mid + level, choose some records from it and dive below them towards a leaf + page to analyze. */ + ib_uint64_t n_recs_on_level; + + /** Number of different key values that were found on the mid level. */ + ib_uint64_t n_diff_on_level; + + /** Number of leaf pages that are analyzed. This is also the same as + the number of records that we pick from the mid level and dive below + them. */ + ib_uint64_t n_leaf_pages_to_analyze; + + /** Cumulative sum of the number of different key values that were + found on all analyzed pages. */ + ib_uint64_t n_diff_all_analyzed_pages; + + /** Cumulative sum of the number of external pages (stored outside of + the btree but in the same file segment). */ + ib_uint64_t n_external_pages_sum; +}; + +/** Estimate the number of different key values in an index when looking at +the first n_prefix columns. For a given level in an index select +n_diff_data->n_leaf_pages_to_analyze records from that level and dive below +them to the corresponding leaf pages, then scan those leaf pages and save the +sampling results in n_diff_data->n_diff_all_analyzed_pages. +@param[in] index index +@param[in] n_prefix look at first 'n_prefix' columns when +comparing records +@param[in] boundaries a vector that contains +n_diff_data->n_diff_on_level integers each of which represents the index (on +level 'level', counting from left/smallest to right/biggest from 0) of the +last record from each group of distinct keys +@param[in,out] n_diff_data n_diff_all_analyzed_pages and +n_external_pages_sum in this structure will be set by this function. The +members level, n_diff_on_level and n_leaf_pages_to_analyze must be set by the +caller in advance - they are used by some calculations inside this function +@param[in,out] mtr mini-transaction */ static void dict_stats_analyze_index_for_n_prefix( -/*==================================*/ - dict_index_t* index, /*!< in/out: index */ - ulint level, /*!< in: level, must be >= 1 */ - ib_uint64_t total_recs_on_level, - /*!< in: total number of - records on the given level */ - ulint n_prefix, /*!< in: look at first - n_prefix columns when - comparing records */ - ib_uint64_t n_diff_for_this_prefix, - /*!< in: number of distinct - records on the given level, - when looking at the first - n_prefix columns */ - boundaries_t* boundaries, /*!< in: array that contains - n_diff_for_this_prefix - integers each of which - represents the index (on the - level, counting from - left/smallest to right/biggest - from 0) of the last record - from each group of distinct - keys */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + dict_index_t* index, + ulint n_prefix, + const boundaries_t* boundaries, + n_diff_data_t* n_diff_data, + mtr_t* mtr) { btr_pcur_t pcur; const page_t* page; ib_uint64_t rec_idx; - ib_uint64_t last_idx_on_level; - ib_uint64_t n_recs_to_dive_below; - ib_uint64_t n_diff_sum_of_all_analyzed_pages; ib_uint64_t i; #if 0 DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu, n_prefix=%lu, " - "n_diff_for_this_prefix=" UINT64PF ")\n", + "n_diff_on_level=" UINT64PF ")\n", __func__, index->table->name, index->name, level, - n_prefix, n_diff_for_this_prefix); + n_prefix, n_diff_data->n_diff_on_level); #endif ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_S_LOCK)); - /* if some of those is 0 then this means that there is exactly one - page in the B-tree and it is empty and we should have done full scan - and should not be here */ - ut_ad(total_recs_on_level > 0); - ut_ad(n_diff_for_this_prefix > 0); - - /* this must be at least 1 */ - ut_ad(N_SAMPLE_PAGES(index) > 0); - /* Position pcur on the leftmost record on the leftmost page on the desired level. */ btr_pcur_open_at_index_side( true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED, - &pcur, true, level, mtr); + &pcur, true, n_diff_data->level, mtr); btr_pcur_move_to_next_on_page(&pcur); page = btr_pcur_get_page(&pcur); + const rec_t* first_rec = btr_pcur_get_rec(&pcur); + + /* We shouldn't be scanning the leaf level. The caller of this function + should have stopped the descend on level 1 or higher. */ + ut_ad(n_diff_data->level > 0); + ut_ad(!page_is_leaf(page)); + /* The page must not be empty, except when it is the root page (and the whole index is empty). */ - ut_ad(btr_pcur_is_on_user_rec(&pcur) || page_is_leaf(page)); - ut_ad(btr_pcur_get_rec(&pcur) - == page_rec_get_next_const(page_get_infimum_rec(page))); + ut_ad(btr_pcur_is_on_user_rec(&pcur)); + ut_ad(first_rec == page_rec_get_next_const(page_get_infimum_rec(page))); /* check that we are indeed on the desired level */ - ut_a(btr_page_get_level(page, mtr) == level); + ut_a(btr_page_get_level(page, mtr) == n_diff_data->level); /* there should not be any pages on the left */ ut_a(btr_page_get_prev(page, mtr) == FIL_NULL); /* check whether the first record on the leftmost page is marked - as such, if we are on a non-leaf level */ - ut_a((level == 0) - == !(REC_INFO_MIN_REC_FLAG & rec_get_info_bits( - btr_pcur_get_rec(&pcur), page_is_comp(page)))); + as such; we are on a non-leaf level */ + ut_a(rec_get_info_bits(first_rec, page_is_comp(page)) + & REC_INFO_MIN_REC_FLAG); - last_idx_on_level = boundaries->at( - static_cast<unsigned int>(n_diff_for_this_prefix - 1)); + const ib_uint64_t last_idx_on_level = boundaries->at( + static_cast<unsigned>(n_diff_data->n_diff_on_level - 1)); rec_idx = 0; - n_diff_sum_of_all_analyzed_pages = 0; - - n_recs_to_dive_below = ut_min(N_SAMPLE_PAGES(index), - n_diff_for_this_prefix); - - for (i = 0; i < n_recs_to_dive_below; i++) { - ib_uint64_t left; - ib_uint64_t right; - ib_uint64_t rnd; - ib_uint64_t dive_below_idx; + n_diff_data->n_diff_all_analyzed_pages = 0; + n_diff_data->n_external_pages_sum = 0; - /* there are n_diff_for_this_prefix elements + for (i = 0; i < n_diff_data->n_leaf_pages_to_analyze; i++) { + /* there are n_diff_on_level elements in 'boundaries' and we divide those elements - into n_recs_to_dive_below segments, for example: + into n_leaf_pages_to_analyze segments, for example: - let n_diff_for_this_prefix=100, n_recs_to_dive_below=4, then: + let n_diff_on_level=100, n_leaf_pages_to_analyze=4, then: segment i=0: [0, 24] segment i=1: [25, 49] segment i=2: [50, 74] segment i=3: [75, 99] or - let n_diff_for_this_prefix=1, n_recs_to_dive_below=1, then: + let n_diff_on_level=1, n_leaf_pages_to_analyze=1, then: segment i=0: [0, 0] or - let n_diff_for_this_prefix=2, n_recs_to_dive_below=2, then: + let n_diff_on_level=2, n_leaf_pages_to_analyze=2, then: segment i=0: [0, 0] segment i=1: [1, 1] or - let n_diff_for_this_prefix=13, n_recs_to_dive_below=7, then: + let n_diff_on_level=13, n_leaf_pages_to_analyze=7, then: segment i=0: [0, 0] segment i=1: [1, 2] segment i=2: [3, 4] @@ -1661,9 +1716,12 @@ dict_stats_analyze_index_for_n_prefix( then we select a random record from each segment and dive below it */ - left = n_diff_for_this_prefix * i / n_recs_to_dive_below; - right = n_diff_for_this_prefix * (i + 1) - / n_recs_to_dive_below - 1; + const ib_uint64_t n_diff = n_diff_data->n_diff_on_level; + const ib_uint64_t n_pick + = n_diff_data->n_leaf_pages_to_analyze; + + const ib_uint64_t left = n_diff * i / n_pick; + const ib_uint64_t right = n_diff * (i + 1) / n_pick - 1; ut_a(left <= right); ut_a(right <= last_idx_on_level); @@ -1671,11 +1729,11 @@ dict_stats_analyze_index_for_n_prefix( /* we do not pass (left, right) because we do not want to ask ut_rnd_interval() to work with too big numbers since ib_uint64_t could be bigger than ulint */ - rnd = static_cast<ib_uint64_t>( - ut_rnd_interval(0, static_cast<ulint>(right - left))); + const ulint rnd = ut_rnd_interval( + 0, static_cast<ulint>(right - left)); - dive_below_idx = boundaries->at( - static_cast<unsigned int>(left + rnd)); + const ib_uint64_t dive_below_idx + = boundaries->at(static_cast<unsigned>(left + rnd)); #if 0 DEBUG_PRINTF(" %s(): dive below record with index=" @@ -1711,9 +1769,13 @@ dict_stats_analyze_index_for_n_prefix( ut_a(rec_idx == dive_below_idx); ib_uint64_t n_diff_on_leaf_page; + ib_uint64_t n_external_pages; - n_diff_on_leaf_page = dict_stats_analyze_index_below_cur( - btr_pcur_get_btr_cur(&pcur), n_prefix, mtr); + dict_stats_analyze_index_below_cur(btr_pcur_get_btr_cur(&pcur), + n_prefix, + &n_diff_on_leaf_page, + &n_external_pages, + mtr); /* We adjust n_diff_on_leaf_page here to avoid counting one record twice - once as the last on some page and once @@ -1733,37 +1795,86 @@ dict_stats_analyze_index_for_n_prefix( n_diff_on_leaf_page--; } - n_diff_sum_of_all_analyzed_pages += n_diff_on_leaf_page; - } - - /* n_diff_sum_of_all_analyzed_pages can be 0 here if all the leaf - pages sampled contained only delete-marked records. In this case - we should assign 0 to index->stat_n_diff_key_vals[n_prefix - 1], which - the formula below does. */ + n_diff_data->n_diff_all_analyzed_pages += n_diff_on_leaf_page; - /* See REF01 for an explanation of the algorithm */ - index->stat_n_diff_key_vals[n_prefix - 1] - = index->stat_n_leaf_pages - - * n_diff_for_this_prefix - / total_recs_on_level - - * n_diff_sum_of_all_analyzed_pages - / n_recs_to_dive_below; + n_diff_data->n_external_pages_sum += n_external_pages; + } - index->stat_n_sample_sizes[n_prefix - 1] = n_recs_to_dive_below; + btr_pcur_close(&pcur); +} - DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu " - "(%lu" - " * " UINT64PF " / " UINT64PF - " * " UINT64PF " / " UINT64PF ")\n", - __func__, index->stat_n_diff_key_vals[n_prefix - 1], - n_prefix, - index->stat_n_leaf_pages, - n_diff_for_this_prefix, total_recs_on_level, - n_diff_sum_of_all_analyzed_pages, n_recs_to_dive_below); +/** Set dict_index_t::stat_n_diff_key_vals[] and stat_n_sample_sizes[]. +@param[in] n_diff_data input data to use to derive the results +@param[in,out] index index whose stat_n_diff_key_vals[] to set */ +UNIV_INLINE +void +dict_stats_index_set_n_diff( + const n_diff_data_t* n_diff_data, + dict_index_t* index) +{ + for (ulint n_prefix = dict_index_get_n_unique(index); + n_prefix >= 1; + n_prefix--) { + /* n_diff_all_analyzed_pages can be 0 here if + all the leaf pages sampled contained only + delete-marked records. In this case we should assign + 0 to index->stat_n_diff_key_vals[n_prefix - 1], which + the formula below does. */ + + const n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + ut_ad(data->n_leaf_pages_to_analyze > 0); + ut_ad(data->n_recs_on_level > 0); + + ulint n_ordinary_leaf_pages; + + if (data->level == 1) { + /* If we know the number of records on level 1, then + this number is the same as the number of pages on + level 0 (leaf). */ + n_ordinary_leaf_pages = data->n_recs_on_level; + } else { + /* If we analyzed D ordinary leaf pages and found E + external pages in total linked from those D ordinary + leaf pages, then this means that the ratio + ordinary/external is D/E. Then the ratio ordinary/total + is D / (D + E). Knowing that the total number of pages + is T (including ordinary and external) then we estimate + that the total number of ordinary leaf pages is + T * D / (D + E). */ + n_ordinary_leaf_pages + = index->stat_n_leaf_pages + * data->n_leaf_pages_to_analyze + / (data->n_leaf_pages_to_analyze + + data->n_external_pages_sum); + } - btr_pcur_close(&pcur); + /* See REF01 for an explanation of the algorithm */ + index->stat_n_diff_key_vals[n_prefix - 1] + = n_ordinary_leaf_pages + + * data->n_diff_on_level + / data->n_recs_on_level + + * data->n_diff_all_analyzed_pages + / data->n_leaf_pages_to_analyze; + + index->stat_n_sample_sizes[n_prefix - 1] + = data->n_leaf_pages_to_analyze; + + DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu" + " (%lu" + " * " UINT64PF " / " UINT64PF + " * " UINT64PF " / " UINT64PF ")\n", + __func__, + index->stat_n_diff_key_vals[n_prefix - 1], + n_prefix, + index->stat_n_leaf_pages, + data->n_diff_on_level, + data->n_recs_on_level, + data->n_diff_all_analyzed_pages, + data->n_leaf_pages_to_analyze); + } } /*********************************************************************//** @@ -1781,10 +1892,8 @@ dict_stats_analyze_index( bool level_is_analyzed; ulint n_uniq; ulint n_prefix; - ib_uint64_t* n_diff_on_level; ib_uint64_t total_recs; ib_uint64_t total_pages; - boundaries_t* n_diff_boundaries; mtr_t mtr; ulint size; DBUG_ENTER("dict_stats_analyze_index"); @@ -1870,11 +1979,18 @@ dict_stats_analyze_index( DBUG_VOID_RETURN; } - /* set to zero */ - n_diff_on_level = reinterpret_cast<ib_uint64_t*> - (mem_zalloc(n_uniq * sizeof(ib_uint64_t))); + /* For each level that is being scanned in the btree, this contains the + number of different key values for all possible n-column prefixes. */ + ib_uint64_t* n_diff_on_level = new ib_uint64_t[n_uniq]; - n_diff_boundaries = new boundaries_t[n_uniq]; + /* For each level that is being scanned in the btree, this contains the + index of the last record from each group of equal records (when + comparing only the first n columns, n=1..n_uniq). */ + boundaries_t* n_diff_boundaries = new boundaries_t[n_uniq]; + + /* For each n-column prefix this array contains the input data that is + used to calculate dict_index_t::stat_n_diff_key_vals[]. */ + n_diff_data_t* n_diff_data = new n_diff_data_t[n_uniq]; /* total_recs is also used to estimate the number of pages on one level below, so at the start we have 1 page (the root) */ @@ -1986,12 +2102,12 @@ dict_stats_analyze_index( level_is_analyzed = true; - if (n_diff_on_level[n_prefix - 1] - >= N_DIFF_REQUIRED(index) - || level == 1) { - /* we found a good level with many distinct - records or we have reached the last level we - could scan */ + if (level == 1 + || n_diff_on_level[n_prefix - 1] + >= N_DIFF_REQUIRED(index)) { + /* we have reached the last level we could scan + or we found a good level with many distinct + records */ break; } @@ -2004,7 +2120,6 @@ found_level: " distinct records for n_prefix=%lu\n", __func__, level, n_diff_on_level[n_prefix - 1], n_prefix); - /* here we are either on level 1 or the level that we are on contains >= N_DIFF_REQUIRED distinct keys or we did not scan deeper levels because they would contain too many pages */ @@ -2013,20 +2128,47 @@ found_level: ut_ad(level_is_analyzed); + /* if any of these is 0 then there is exactly one page in the + B-tree and it is empty and we should have done full scan and + should not be here */ + ut_ad(total_recs > 0); + ut_ad(n_diff_on_level[n_prefix - 1] > 0); + + ut_ad(N_SAMPLE_PAGES(index) > 0); + + n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + data->level = level; + + data->n_recs_on_level = total_recs; + + data->n_diff_on_level = n_diff_on_level[n_prefix - 1]; + + data->n_leaf_pages_to_analyze = std::min( + N_SAMPLE_PAGES(index), + n_diff_on_level[n_prefix - 1]); + /* pick some records from this level and dive below them for the given n_prefix */ dict_stats_analyze_index_for_n_prefix( - index, level, total_recs, n_prefix, - n_diff_on_level[n_prefix - 1], - &n_diff_boundaries[n_prefix - 1], &mtr); + index, n_prefix, &n_diff_boundaries[n_prefix - 1], + data, &mtr); } mtr_commit(&mtr); delete[] n_diff_boundaries; - mem_free(n_diff_on_level); + delete[] n_diff_on_level; + + /* n_prefix == 0 means that the above loop did not end up prematurely + due to tree being changed and so n_diff_data[] is set up. */ + if (n_prefix == 0) { + dict_stats_index_set_n_diff(n_diff_data, index); + } + + delete[] n_diff_data; dict_stats_assert_initialized_index(index); DBUG_VOID_RETURN; @@ -2201,17 +2343,21 @@ dict_stats_save_index_stat( "END;", trx); if (ret != DB_SUCCESS) { - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Cannot save index statistics for table " - "%s, index %s, stat name \"%s\": %s\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index)), - stat_name, ut_strerr(ret)); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Cannot save index statistics for table " + "%s, index %s, stat name \"%s\": %s\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index)), + stat_name, ut_strerr(ret)); + index->stats_error_printed = true; + } } return(ret); @@ -2900,20 +3046,24 @@ dict_stats_update_for_index( } /* else */ - /* Fall back to transient stats since the persistent - storage is not present or is corrupted */ - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s index %s but the required " - "persistent statistics storage is not present or is " - "corrupted. Using transient stats instead.\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index))); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + /* Fall back to transient stats since the persistent + storage is not present or is corrupted */ + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s index %s but the required " + "persistent statistics storage is not present or is " + "corrupted. Using transient stats instead.\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index))); + index->stats_error_printed = false; + } } dict_table_stats_lock(index->table, RW_X_LATCH); @@ -2998,13 +3148,17 @@ dict_stats_update( /* Fall back to transient stats since the persistent storage is not present or is corrupted */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s but the required persistent " - "statistics storage is not present or is corrupted. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf))); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s but the required persistent " + "statistics storage is not present or is corrupted. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, buf, sizeof(buf))); + table->stats_error_printed = true; + } goto transient; @@ -3048,17 +3202,21 @@ dict_stats_update( /* persistent statistics storage does not exist or is corrupted, calculate the transient stats */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: Fetch of persistent " - "statistics requested for table %s but the " - "required system tables %s and %s are not " - "present or have unexpected structure. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, - buf, sizeof(buf)), - TABLE_STATS_NAME_PRINT, - INDEX_STATS_NAME_PRINT); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error: Fetch of persistent " + "statistics requested for table %s but the " + "required system tables %s and %s are not " + "present or have unexpected structure. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, + buf, sizeof(buf)), + TABLE_STATS_NAME_PRINT, + INDEX_STATS_NAME_PRINT); + table->stats_error_printed = true; + } goto transient; } @@ -3128,16 +3286,19 @@ dict_stats_update( dict_stats_table_clone_free(t); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error fetching persistent statistics " - "for table %s from %s and %s: %s. " - "Using transient stats method instead.\n", - ut_format_name(table->name, TRUE, buf, - sizeof(buf)), - TABLE_STATS_NAME, - INDEX_STATS_NAME, - ut_strerr(err)); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error fetching persistent statistics " + "for table %s from %s and %s: %s. " + "Using transient stats method instead.\n", + ut_format_name(table->name, TRUE, buf, + sizeof(buf)), + TABLE_STATS_NAME, + INDEX_STATS_NAME, + ut_strerr(err)); + } goto transient; } diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 486a931242b..74d5b51d230 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -761,7 +761,7 @@ fil_node_open_file( fprintf(stderr, "InnoDB: Error: the size of single-table" " tablespace file %s\n" - "InnoDB: is only "UINT64PF"," + "InnoDB: is only " UINT64PF "," " should be at least %lu!\n", node->name, size_bytes, @@ -1815,6 +1815,9 @@ fil_close_all_files(void) { fil_space_t* space; + if (srv_track_changed_pages && srv_redo_log_thread_started) + os_event_wait(srv_redo_log_tracked_event); + mutex_enter(&fil_system->mutex); space = UT_LIST_GET_FIRST(fil_system->space_list); @@ -1851,6 +1854,9 @@ fil_close_log_files( { fil_space_t* space; + if (srv_track_changed_pages && srv_redo_log_thread_started) + os_event_wait(srv_redo_log_tracked_event); + mutex_enter(&fil_system->mutex); space = UT_LIST_GET_FIRST(fil_system->space_list); @@ -5543,7 +5549,7 @@ _fil_io( ulint mode; fil_space_t* space; fil_node_t* node; - ibool ret; + ibool ret=TRUE; ulint is_log; ulint wake_later; os_offset_t offset; @@ -5767,7 +5773,6 @@ _fil_io( offset, len); } #endif /* !UNIV_HOTBACKUP */ - ut_a(ret); if (mode == OS_AIO_SYNC) { /* The i/o operation is already completed when we return from @@ -5782,7 +5787,11 @@ _fil_io( ut_ad(fil_validate_skip()); } - return(DB_SUCCESS); + if (!ret) { + return(DB_OUT_OF_FILE_SPACE); + } else { + return(DB_SUCCESS); + } } #ifndef UNIV_HOTBACKUP diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc index 795f08da966..a19df8b39da 100644 --- a/storage/xtradb/fts/fts0fts.cc +++ b/storage/xtradb/fts/fts0fts.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -44,6 +44,13 @@ Full Text Search interface /** Column name from the FTS config table */ #define FTS_MAX_CACHE_SIZE_IN_MB "cache_size_in_mb" +/** Verify if a aux table name is a obsolete table +by looking up the key word in the obsolete table names */ +#define FTS_IS_OBSOLETE_AUX_TABLE(table_name) \ + (strstr((table_name), "DOC_ID") != NULL \ + || strstr((table_name), "ADDED") != NULL \ + || strstr((table_name), "STOPWORDS") != NULL) + /** This is maximum FTS cache for each table and would be a configurable variable */ UNIV_INTERN ulong fts_max_cache_size; @@ -5837,6 +5844,12 @@ fts_is_aux_table_name( } } + /* Could be obsolete common tables. */ + if (strncmp(ptr, "ADDED", len) == 0 + || strncmp(ptr, "STOPWORDS", len) == 0) { + return(true); + } + /* Try and read the index id. */ if (!fts_read_object_id(&table->index_id, ptr)) { return(FALSE); @@ -6433,6 +6446,56 @@ fts_check_and_drop_orphaned_tables( mem_free(path); } + } else { + if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) { + + /* Current table could be one of the three + obsolete tables, in this case, we should + always try to drop it but not rename it. + This could happen when we try to upgrade + from older server to later one, which doesn't + contain these obsolete tables. */ + drop = true; + + dberr_t err; + trx_t* trx_drop = + trx_allocate_for_background(); + + trx_drop->op_info = "Drop obsolete aux tables"; + trx_drop->dict_operation_lock_mode = RW_X_LATCH; + + trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE); + + err = row_drop_table_for_mysql( + aux_table->name, trx_drop, false, true); + + trx_drop->dict_operation_lock_mode = 0; + + if (err != DB_SUCCESS) { + /* We don't need to worry about the + failure, since server would try to + drop it on next restart, even if + the table was broken. */ + + ib_logf(IB_LOG_LEVEL_WARN, + "Fail to drop obsolete aux" + " table '%s', which is" + " harmless. will try to drop" + " it on next restart.", + aux_table->name); + + fts_sql_rollback(trx_drop); + } else { + ib_logf(IB_LOG_LEVEL_INFO, + "Dropped obsolete aux" + " table '%s'.", + aux_table->name); + + fts_sql_commit(trx_drop); + } + + trx_free_for_background(trx_drop); + } } #ifdef _WIN32 if (!drop && rename) { diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc index a9f3a25530d..910a00cd521 100644 --- a/storage/xtradb/fts/fts0opt.cc +++ b/storage/xtradb/fts/fts0opt.cc @@ -95,7 +95,7 @@ enum fts_msg_type_t { /** Compressed list of words that have been read from FTS INDEX that needs to be optimized. */ struct fts_zip_t { - ulint status; /*!< Status of (un)/zip operation */ + lint status; /*!< Status of (un)/zip operation */ ulint n_words; /*!< Number of words compressed */ diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index cdcadb90d64..66e9ae1f7b5 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -4,6 +4,7 @@ Copyright (c) 2000, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2013, 2014 SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -97,6 +98,7 @@ this program; if not, write to the Free Software Foundation, Inc., #include "fts0types.h" #include "row0import.h" #include "row0quiesce.h" +#include "row0mysql.h" #ifdef UNIV_DEBUG #include "trx0purge.h" #endif /* UNIV_DEBUG */ @@ -489,7 +491,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] = { {&trx_purge_latch_key, "trx_purge_latch", 0}, {&index_tree_rw_lock_key, "index_tree_rw_lock", 0}, {&index_online_log_key, "index_online_log", 0}, - {&dict_table_stats_latch_key, "dict_table_stats", 0}, + {&dict_table_stats_key, "dict_table_stats", 0}, {&hash_table_rw_lock_key, "hash_table_locks", 0} }; # endif /* UNIV_PFS_RWLOCK */ @@ -1215,6 +1217,22 @@ innobase_start_trx_and_assign_read_view( THD* thd); /* in: MySQL thread handle of the user for whom the transaction should be committed */ +/*****************************************************************//** +Creates an InnoDB transaction struct for the thd if it does not yet have one. +Starts a new InnoDB transaction if a transaction is not yet started. And +clones snapshot for a consistent read from another session, if it has one. +@return 0 */ +static +int +innobase_start_trx_and_clone_read_view( +/*====================================*/ + handlerton* hton, /* in: Innodb handlerton */ + THD* thd, /* in: MySQL thread handle of the + user for whom the transaction should + be committed */ + THD* from_thd); /* in: MySQL thread handle of the + user session from which the consistent + read should be cloned */ /****************************************************************//** Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes the logs, and the name of this function should be innobase_checkpoint. @@ -3895,6 +3913,14 @@ innobase_end( if (innodb_inited) { + THD *thd= current_thd; + if (thd) { // may be UNINSTALL PLUGIN statement + trx_t* trx = thd_to_trx(thd); + if (trx) { + trx_free_for_mysql(trx); + } + } + srv_fast_shutdown = (ulint) innobase_fast_shutdown; innodb_inited = 0; @@ -4188,6 +4214,102 @@ innobase_commit_ordered( } /*****************************************************************//** +Creates an InnoDB transaction struct for the thd if it does not yet have one. +Starts a new InnoDB transaction if a transaction is not yet started. And +clones snapshot for a consistent read from another session, if it has one. +@return 0 */ +static +int +innobase_start_trx_and_clone_read_view( +/*====================================*/ + handlerton* hton, /* in: Innodb handlerton */ + THD* thd, /* in: MySQL thread handle of the + user for whom the transaction should + be committed */ + THD* from_thd) /* in: MySQL thread handle of the + user session from which the consistent + read should be cloned */ +{ + trx_t* trx; + trx_t* from_trx; + + DBUG_ENTER("innobase_start_trx_and_clone_read_view"); + DBUG_ASSERT(hton == innodb_hton_ptr); + + /* Get transaction handle from the donor session */ + + from_trx = thd_to_trx(from_thd); + + if (!from_trx) { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: WITH CONSISTENT SNAPSHOT " + "FROM SESSION was ignored because the " + "specified session does not have an open " + "transaction inside InnoDB."); + + DBUG_RETURN(0); + } + + /* Create a new trx struct for thd, if it does not yet have one */ + + trx = check_trx_exists(thd); + + /* This is just to play safe: release a possible FIFO ticket and + search latch. Since we can potentially reserve the trx_sys->mutex, + we have to release the search system latch first to obey the latching + order. */ + + trx_search_latch_release_if_reserved(trx); + + innobase_srv_conc_force_exit_innodb(trx); + + /* If the transaction is not started yet, start it */ + + trx_start_if_not_started_xa(trx); + + /* Clone the read view from the donor transaction. Do this only if + transaction is using REPEATABLE READ isolation level. */ + trx->isolation_level = innobase_map_isolation_level( + thd_get_trx_isolation(thd)); + + if (trx->isolation_level != TRX_ISO_REPEATABLE_READ) { + + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: WITH CONSISTENT SNAPSHOT " + "was ignored because this phrase " + "can only be used with " + "REPEATABLE READ isolation level."); + } else { + + lock_mutex_enter(); + mutex_enter(&trx_sys->mutex); + trx_mutex_enter(from_trx); + + if (!trx_clone_read_view(trx, from_trx)) { + + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: WITH CONSISTENT SNAPSHOT " + "FROM SESSION was ignored because " + "the target transaction has not been " + "assigned a read view."); + } + + trx_mutex_exit(from_trx); + mutex_exit(&trx_sys->mutex); + lock_mutex_exit(); + } + + /* Set the MySQL flag to mark that there is an active transaction */ + + innobase_register_trx(hton, current_thd, trx); + + DBUG_RETURN(0); +} + +/*****************************************************************//** Commits a transaction in an InnoDB database or marks an SQL statement ended. @return 0 */ @@ -4711,7 +4833,7 @@ innobase_close_connection( sql_print_warning( "MySQL is closing a connection that has an active " - "InnoDB transaction. "TRX_ID_FMT" row modifications " + "InnoDB transaction. " TRX_ID_FMT " row modifications " "will roll back.", trx->undo_no); } @@ -4802,18 +4924,27 @@ innobase_kill_connection( } wsrep_thd_UNLOCK(thd); #endif /* WITH_WSREP */ + trx = thd_to_trx(thd); - if (trx) - { - /* Cancel a pending lock request. */ - lock_mutex_enter(); - trx_mutex_enter(trx); - if (trx->lock.wait_lock) - lock_cancel_waiting_and_release(trx->lock.wait_lock); - trx_mutex_exit(trx); - lock_mutex_exit(); - } + if (trx) { + THD *cur = current_thd; + THD *owner = trx->current_lock_mutex_owner; + + if (owner != cur) { + lock_mutex_enter(); + } + trx_mutex_enter(trx); + + /* Cancel a pending lock request. */ + if (trx->lock.wait_lock) + lock_cancel_waiting_and_release(trx->lock.wait_lock); + + trx_mutex_exit(trx); + if (owner != cur) { + lock_mutex_exit(); + } + } DBUG_VOID_RETURN; } @@ -4828,14 +4959,11 @@ handler::Table_flags ha_innobase::table_flags() const /*============================*/ { - THD *thd = ha_thd(); /* Need to use tx_isolation here since table flags is (also) called before prebuilt is inited. */ - ulong const tx_isolation = thd_tx_isolation(thd); + ulong const tx_isolation = thd_tx_isolation(ha_thd()); - if (tx_isolation <= ISO_READ_COMMITTED && - !(tx_isolation == ISO_READ_COMMITTED && - thd_rpl_is_parallel(thd))) { + if (tx_isolation <= ISO_READ_COMMITTED) { return(int_table_flags); } @@ -8337,7 +8465,7 @@ calc_row_difference( if (doc_id < prebuilt->table->fts->cache->next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be larger than" - " "IB_ID_FMT" for table", + " " IB_ID_FMT " for table", innodb_table->fts->cache->next_doc_id - 1); ut_print_name(stderr, trx, @@ -8349,9 +8477,9 @@ calc_row_difference( - prebuilt->table->fts->cache->next_doc_id) >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " Doc ID used "UINT64PF" cannot" + " Doc ID used " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, prebuilt->table->fts->cache->next_doc_id - 1, @@ -9134,6 +9262,29 @@ ha_innobase::innobase_get_index( index = innobase_index_lookup(share, keynr); if (index) { + + if (!key || ut_strcmp(index->name, key->name) != 0) { + fprintf(stderr, "InnoDB: [Error] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + + for(ulint i=0; i < table->s->keys; i++) { + index = innobase_index_lookup(share, i); + key = table->key_info + keynr; + + if (index) { + + fprintf(stderr, "InnoDB: [Note] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + } + } + } + ut_a(ut_strcmp(index->name, key->name) == 0); } else { /* Can't find index with keynr in the translation @@ -13064,6 +13215,35 @@ ha_innobase::info_low( break; } + DBUG_EXECUTE_IF("ib_ha_innodb_stat_not_initialized", + index->table->stat_initialized = FALSE;); + + if (!ib_table->stat_initialized || + (index->table != ib_table || + !index->table->stat_initialized)) { + fprintf(stderr, + "InnoDB: Warning: Index %s points to table %s" + " and ib_table %s statistics is initialized %d " + " but index table %s initialized %d " + " mysql table is %s. Have you mixed " + "up .frm files from different " + "installations? " + "See " REFMAN + "innodb-troubleshooting.html\n", + index->name, + index->table->name, + ib_table->name, + ib_table->stat_initialized, + index->table->name, + index->table->stat_initialized, + table->s->table_name.str + ); + + /* This is better than + assert on below function */ + dict_stats_init(index->table); + } + rec_per_key = innodb_rec_per_key( index, j, stats.records); @@ -19333,6 +19513,11 @@ static MYSQL_SYSVAR_ULONG(saved_page_number_debug, NULL, innodb_save_page_no, 0, 0, UINT_MAX32, 0); #endif /* UNIV_DEBUG */ +static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures, + PLUGIN_VAR_NOCMDARG, + "Simulate compression failures.", + NULL, NULL, 0, 0, 99, 0); + const char *corrupt_table_action_names[]= { "assert", /* 0 */ @@ -19563,6 +19748,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(fake_changes), MYSQL_SYSVAR(locking_fake_changes), MYSQL_SYSVAR(use_stacktrace), + MYSQL_SYSVAR(simulate_comp_failures), NULL }; @@ -19841,7 +20027,7 @@ ib_senderrf( va_start(args, code); - myf l; + myf l=0; switch(level) { case IB_LOG_LEVEL_INFO: diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index f65716aca0f..85b98f19ae0 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -3171,6 +3171,9 @@ error_handling: case DB_DUPLICATE_KEY: my_error(ER_DUP_KEY, MYF(0), "SYS_INDEXES"); break; + case DB_OUT_OF_FILE_SPACE: + my_error_innodb(error, table_name, user_table->flags); + break; default: my_error_innodb(error, table_name, user_table->flags); } diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h index 8a35cb1a3da..4ed66e76fe0 100644 --- a/storage/xtradb/include/btr0cur.h +++ b/storage/xtradb/include/btr0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -582,6 +582,17 @@ void btr_estimate_number_of_different_key_vals( /*======================================*/ dict_index_t* index); /*!< in: index */ + +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() +@return externally stored part, in units of a database page */ + +ulint +btr_rec_get_externally_stored_len( + const rec_t* rec, + const ulint* offsets); + /*******************************************************************//** Marks non-updated off-page fields as disowned by this record. The ownership must be transferred to the updated record which is inserted elsewhere in the diff --git a/storage/xtradb/include/buf0buf.ic b/storage/xtradb/include/buf0buf.ic index c49061621f3..b0d8e03ecb9 100644 --- a/storage/xtradb/include/buf0buf.ic +++ b/storage/xtradb/include/buf0buf.ic @@ -662,6 +662,11 @@ buf_page_get_block( buf_page_t* bpage) /*!< in: control block, or NULL */ { if (bpage != NULL) { +#ifdef UNIV_DEBUG + buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + ut_ad(buf_page_hash_lock_held_s_or_x(buf_pool, bpage) + || mutex_own(&buf_pool->LRU_list_mutex)); +#endif ut_ad(buf_page_in_file(bpage)); if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) { diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h index 3e3fb9f896c..136f7b52aba 100644 --- a/storage/xtradb/include/dict0dict.h +++ b/storage/xtradb/include/dict0dict.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -43,6 +43,9 @@ Created 1/8/1996 Heikki Tuuri #include "trx0types.h" #include "row0types.h" +extern bool innodb_table_stats_not_found; +extern bool innodb_index_stats_not_found; + #ifndef UNIV_HOTBACKUP # include "sync0sync.h" # include "sync0rw.h" @@ -1435,6 +1438,28 @@ UNIV_INTERN void dict_mutex_exit_for_mysql(void); /*===========================*/ + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled); + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table); + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. table->id is used to pick the corresponding latch from a global array of diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h index 7c902ef2d71..f6133e258c0 100644 --- a/storage/xtradb/include/dict0mem.h +++ b/storage/xtradb/include/dict0mem.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -49,6 +49,7 @@ Created 1/8/1996 Heikki Tuuri #include "hash0hash.h" #include "trx0types.h" #include "fts0fts.h" +#include "os0once.h" /* Forward declaration. */ struct ib_rbt_t; @@ -641,6 +642,9 @@ struct dict_index_t{ ulint stat_n_leaf_pages; /*!< approximate number of leaf pages in the index tree */ + bool stats_error_printed; + /*!< has persistent statistics error printed + for this index ? */ /* @} */ prio_rw_lock_t lock; /*!< read-write lock protecting the upper levels of the index tree */ @@ -856,6 +860,10 @@ struct dict_table_t{ initialized in dict_table_add_to_cache() */ /** Statistics for query optimization */ /* @{ */ + + volatile os_once::state_t stats_latch_created; + /*!< Creation state of 'stats_latch'. */ + rw_lock_t* stats_latch; /*!< this latch protects: dict_table_t::stat_initialized dict_table_t::stat_n_rows (*) @@ -965,6 +973,9 @@ struct dict_table_t{ /*!< see BG_STAT_* above. Writes are covered by dict_sys->mutex. Dirty reads are possible. */ + bool stats_error_printed; + /*!< Has persistent stats error beein + already printed for this table ? */ /* @} */ /*----------------------*/ /**!< The following fields are used by the diff --git a/storage/xtradb/include/lock0lock.h b/storage/xtradb/include/lock0lock.h index 633e4f6626b..cb95c58fe3c 100644 --- a/storage/xtradb/include/lock0lock.h +++ b/storage/xtradb/include/lock0lock.h @@ -291,7 +291,7 @@ lock_rec_insert_check_and_lock( inserted record maybe should inherit LOCK_GAP type locks from the successor record */ - __attribute__((nonnull, warn_unused_result)); + __attribute__((nonnull(2,3,4,6,7), warn_unused_result)); /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (update, delete mark, or delete unmark) of a clustered index record. If they do, diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h index a75091589c6..1ab71d80829 100644 --- a/storage/xtradb/include/os0file.h +++ b/storage/xtradb/include/os0file.h @@ -168,8 +168,8 @@ enum os_file_create_t { #define OS_FILE_LOG 256 /* This can be ORed to type */ /* @} */ -#define OS_AIO_N_PENDING_IOS_PER_THREAD 256 /*!< Windows might be able to handle -more */ +#define OS_AIO_N_PENDING_IOS_PER_THREAD 32 /*!< Win NT does not allow more + than 64 */ /** Modes for aio operations @{ */ #define OS_AIO_NORMAL 21 /*!< Normal asynchronous i/o not for ibuf diff --git a/storage/xtradb/include/os0once.h b/storage/xtradb/include/os0once.h new file mode 100644 index 00000000000..a8bbaf1d2d4 --- /dev/null +++ b/storage/xtradb/include/os0once.h @@ -0,0 +1,125 @@ +/***************************************************************************** + +Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/os0once.h +A class that aids executing a given function exactly once in a multi-threaded +environment. + +Created Feb 20, 2014 Vasil Dimov +*******************************************************/ + +#ifndef os0once_h +#define os0once_h + +#include "univ.i" + +#include "os0sync.h" +#include "ut0ut.h" + +/** Execute a given function exactly once in a multi-threaded environment +or wait for the function to be executed by another thread. + +Example usage: +First the user must create a control variable of type os_once::state_t and +assign it os_once::NEVER_DONE. +Then the user must pass this variable, together with a function to be +executed to os_once::do_or_wait_for_done(). + +Multiple threads can call os_once::do_or_wait_for_done() simultaneously with +the same (os_once::state_t) control variable. The provided function will be +called exactly once and when os_once::do_or_wait_for_done() returns then this +function has completed execution, by this or another thread. In other words +os_once::do_or_wait_for_done() will either execute the provided function or +will wait for its execution to complete if it is already called by another +thread or will do nothing if the function has already completed its execution +earlier. + +This mimics pthread_once(3), but unfortunatelly pthread_once(3) does not +support passing arguments to the init_routine() function. We should use +std::call_once() when we start compiling with C++11 enabled. */ +class os_once { +public: + /** Control variables' state type */ + typedef ib_uint32_t state_t; + + /** Not yet executed. */ + static const state_t NEVER_DONE = 0; + + /** Currently being executed by this or another thread. */ + static const state_t IN_PROGRESS = 1; + + /** Finished execution. */ + static const state_t DONE = 2; + +#ifdef HAVE_ATOMIC_BUILTINS + /** Call a given function or wait its execution to complete if it is + already called by another thread. + @param[in,out] state control variable + @param[in] do_func function to call + @param[in,out] do_func_arg an argument to pass to do_func(). */ + static + void + do_or_wait_for_done( + volatile state_t* state, + void (*do_func)(void*), + void* do_func_arg) + { + /* Avoid calling os_compare_and_swap_uint32() in the most + common case. */ + if (*state == DONE) { + return; + } + + if (os_compare_and_swap_uint32(state, + NEVER_DONE, IN_PROGRESS)) { + /* We are the first. Call the function. */ + + do_func(do_func_arg); + + const bool swapped = os_compare_and_swap_uint32( + state, IN_PROGRESS, DONE); + + ut_a(swapped); + } else { + /* The state is not NEVER_DONE, so either it is + IN_PROGRESS (somebody is calling the function right + now or DONE (it has already been called and completed). + Wait for it to become DONE. */ + for (;;) { + const state_t s = *state; + + switch (s) { + case DONE: + return; + case IN_PROGRESS: + break; + case NEVER_DONE: + /* fall through */ + default: + ut_error; + } + + UT_RELAX_CPU(); + } + } + } +#endif /* HAVE_ATOMIC_BUILTINS */ +}; + +#endif /* os0once_h */ diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index ea5d09ec535..ba22c02a5a9 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -434,6 +434,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ __sync_lock_test_and_set(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + __sync_lock_release(ptr) + #elif defined(HAVE_IB_SOLARIS_ATOMICS) # define HAVE_ATOMIC_BUILTINS @@ -515,6 +518,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ atomic_swap_ulong(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) atomic_swap_uchar(ptr, 0) + #elif defined(HAVE_WINDOWS_ATOMICS) # define HAVE_ATOMIC_BUILTINS @@ -574,7 +580,8 @@ Returns true if swapped, ptr is pointer to target, old_val is value to compare to, new_val is the value to swap in. */ # define os_compare_and_swap_uint32(ptr, old_val, new_val) \ - (win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val) + (InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), \ + new_val, old_val) == old_val) # define os_compare_and_swap_ulint(ptr, old_val, new_val) \ (win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val) @@ -637,6 +644,9 @@ clobbered */ # define os_atomic_test_and_set_ulong(ptr, new_val) \ InterlockedExchange(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) InterlockedExchange(ptr, 0) + #else # define IB_ATOMICS_STARTUP_MSG \ "Mutexes and rw_locks use InnoDB's own implementation" diff --git a/storage/xtradb/include/read0read.h b/storage/xtradb/include/read0read.h index e17d49b1321..0352f129c30 100644 --- a/storage/xtradb/include/read0read.h +++ b/storage/xtradb/include/read0read.h @@ -50,6 +50,27 @@ read_view_open_now( NULL if a new one needs to be created */ /*********************************************************************//** +Clones a read view object. This function will allocate space for two read +views contiguously, one identical in size and content as @param view (starting +at returned pointer) and another view immediately following the trx_ids array. +The second view will have space for an extra trx_id_t element. +@return read view struct */ +UNIV_INTERN +read_view_t* +read_view_clone( +/*============*/ + const read_view_t* view, /*!< in: view to clone */ + read_view_t*& prebuilt_clone);/*!< in,out: prebuilt view or + NULL */ +/*********************************************************************//** +Insert the view in the proper order into the trx_sys->view_list. The +read view list is ordered by read_view_t::low_limit_no in descending order. */ +UNIV_INTERN +void +read_view_add( +/*==========*/ + read_view_t* view); /*!< in: view to add to */ +/*********************************************************************//** Makes a copy of the oldest existing read view, or opens a new. The view must be closed with ..._close. @return own: read view struct */ diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index 4c8a6f7b7b8..8c2f92adf3c 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -3,6 +3,7 @@ Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. +Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -161,9 +162,12 @@ extern char srv_disable_sort_file_cache; thread */ extern os_event_t srv_checkpoint_completed_event; -/* This event is set on the online redo log following thread exit to signal -that the (slow) shutdown may proceed */ -extern os_event_t srv_redo_log_thread_finished_event; +/* This event is set on the online redo log following thread after a successful +log tracking iteration */ +extern os_event_t srv_redo_log_tracked_event; + +/** srv_redo_log_follow_thread spawn flag */ +extern bool srv_redo_log_thread_started; /* If the last data file is auto-extended, we add this many pages to it at a time */ @@ -591,6 +595,8 @@ extern srv_stats_t srv_stats; When FALSE, row locks are not taken at all. */ extern my_bool srv_fake_changes_locks; +/** Simulate compression failures. */ +extern uint srv_simulate_comp_failures; # ifdef UNIV_PFS_THREAD /* Keys to register InnoDB threads with performance schema */ diff --git a/storage/xtradb/include/sync0rw.h b/storage/xtradb/include/sync0rw.h index 95bb7e16b26..0ac6b0f3f69 100644 --- a/storage/xtradb/include/sync0rw.h +++ b/storage/xtradb/include/sync0rw.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -109,14 +109,8 @@ extern ib_mutex_t rw_lock_list_mutex; #ifdef UNIV_SYNC_DEBUG /* The global mutex which protects debug info lists of all rw-locks. To modify the debug info list of an rw-lock, this mutex has to be - acquired in addition to the mutex protecting the lock. */ -extern ib_mutex_t rw_lock_debug_mutex; -extern os_event_t rw_lock_debug_event; /*!< If deadlock detection does - not get immediately the mutex it - may wait for this event */ -extern ibool rw_lock_debug_waiters; /*!< This is set to TRUE, if - there may be waiters for the event */ +extern os_fast_mutex_t rw_lock_debug_mutex; #endif /* UNIV_SYNC_DEBUG */ /** Counters for RW locks. */ @@ -142,7 +136,7 @@ extern mysql_pfs_key_t trx_i_s_cache_lock_key; extern mysql_pfs_key_t trx_purge_latch_key; extern mysql_pfs_key_t index_tree_rw_lock_key; extern mysql_pfs_key_t index_online_log_key; -extern mysql_pfs_key_t dict_table_stats_latch_key; +extern mysql_pfs_key_t dict_table_stats_key; extern mysql_pfs_key_t trx_sys_rw_lock_key; extern mysql_pfs_key_t hash_table_rw_lock_key; #endif /* UNIV_PFS_RWLOCK */ diff --git a/storage/xtradb/include/sync0rw.ic b/storage/xtradb/include/sync0rw.ic index 3511987dbb0..d1675f0a9c9 100644 --- a/storage/xtradb/include/sync0rw.ic +++ b/storage/xtradb/include/sync0rw.ic @@ -916,8 +916,9 @@ pfs_rw_lock_x_lock_func( rw_lock_x_lock_func(lock, pass, file_name, line); - if (locker != NULL) + if (locker != NULL) { PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0); + } } else { @@ -1072,8 +1073,9 @@ pfs_rw_lock_s_lock_func( rw_lock_s_lock_func(lock, pass, file_name, line); - if (locker != NULL) + if (locker != NULL) { PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); + } } else { diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic index a302e1473a5..34541a98789 100644 --- a/storage/xtradb/include/sync0sync.ic +++ b/storage/xtradb/include/sync0sync.ic @@ -111,10 +111,7 @@ mutex_reset_lock_word( ib_mutex_t* mutex) /*!< in: mutex */ { #if defined(HAVE_ATOMIC_BUILTINS) - /* In theory __sync_lock_release should be used to release the lock. - Unfortunately, it does not work properly alone. The workaround is - that more conservative __sync_lock_test_and_set is used instead. */ - os_atomic_test_and_set_byte(&mutex->lock_word, 0); + os_atomic_lock_release_byte(&mutex->lock_word); #else mutex->lock_word = 0; diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h index be13c48fdfc..75325d73f4d 100644 --- a/storage/xtradb/include/trx0trx.h +++ b/storage/xtradb/include/trx0trx.h @@ -275,6 +275,17 @@ read_view_t* trx_assign_read_view( /*=================*/ trx_t* trx); /*!< in: active transaction */ +/********************************************************************//** +Clones the read view from another transaction. All the consistent reads within +the receiver transaction will get the same read view as the donor transaction +@return read view clone */ +UNIV_INTERN +read_view_t* +trx_clone_read_view( +/*================*/ + trx_t* trx, /*!< in: receiver transaction */ + trx_t* from_trx) /*!< in: donor transaction */ + __attribute__((nonnull, warn_unused_result)); /****************************************************************//** Prepares a transaction for commit/rollback. */ UNIV_INTERN @@ -1019,6 +1030,11 @@ struct trx_t{ count of tables being flushed. */ /*------------------------------*/ + THD* current_lock_mutex_owner; + /*!< If this is equal to current_thd, + then in innobase_kill_query() we know we + already hold the lock_sys->mutex. */ + /*------------------------------*/ #ifdef UNIV_DEBUG ulint start_line; /*!< Track where it was started from */ const char* start_file; /*!< Filename where it was started */ diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index 572788f7242..a808e773db2 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -44,10 +44,10 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 17 +#define INNODB_VERSION_BUGFIX 19 #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 65.0 +#define PERCONA_INNODB_VERSION 67.0 #endif /* Enable UNIV_LOG_ARCHIVE in XtraDB */ @@ -454,10 +454,10 @@ typedef unsigned __int64 ib_uint64_t; typedef unsigned __int32 ib_uint32_t; #else /* Use the integer types and formatting strings defined in the C99 standard. */ -# define UINT32PF "%"PRIu32 -# define INT64PF "%"PRId64 -# define UINT64PF "%"PRIu64 -# define UINT64PFx "%016"PRIx64 +# define UINT32PF "%" PRIu32 +# define INT64PF "%" PRId64 +# define UINT64PF "%" PRIu64 +# define UINT64PFx "%016" PRIx64 # define DBUG_LSN_PF UINT64PF typedef int64_t ib_int64_t; typedef uint64_t ib_uint64_t; diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index 4a633c1bcd2..97d4ed77c9c 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -49,6 +49,7 @@ Created 5/7/1996 Heikki Tuuri #include "btr0btr.h" #include "dict0boot.h" #include <set> +#include "mysql/plugin.h" #ifdef WITH_WSREP extern my_bool wsrep_debug; @@ -378,6 +379,11 @@ struct lock_stack_t { ulint heap_no; /*!< heap number if rec lock */ }; +extern "C" void thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd); +extern "C" int thd_need_wait_for(const MYSQL_THD thd); +extern "C" +int thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd); + /** Stack to use during DFS search. Currently only a single stack is required because there is no parallel deadlock check. This stack is protected by the lock_sys_t::mutex. */ @@ -393,6 +399,14 @@ UNIV_INTERN mysql_pfs_key_t lock_sys_mutex_key; UNIV_INTERN mysql_pfs_key_t lock_sys_wait_mutex_key; #endif /* UNIV_PFS_MUTEX */ +/* Buffer to collect THDs to report waits for. */ +struct thd_wait_reports { + struct thd_wait_reports *next; /*!< List link */ + ulint used; /*!< How many elements in waitees[] */ + trx_t *waitees[64]; /*!< Trxs for thd_report_wait_for() */ +}; + + #ifdef UNIV_DEBUG UNIV_INTERN ibool lock_print_waits = FALSE; @@ -1024,6 +1038,32 @@ lock_rec_has_to_wait( return(FALSE); } + if ((type_mode & LOCK_GAP || lock_rec_get_gap(lock2)) && + !thd_need_ordering_with(trx->mysql_thd, + lock2->trx->mysql_thd)) { + /* If the upper server layer has already decided on the + commit order between the transaction requesting the + lock and the transaction owning the lock, we do not + need to wait for gap locks. Such ordeering by the upper + server layer happens in parallel replication, where the + commit order is fixed to match the original order on the + master. + + Such gap locks are mainly needed to get serialisability + between transactions so that they will be binlogged in + the correct order so that statement-based replication + will give the correct results. Since the right order + was already determined on the master, we do not need + to enforce it again here. + + Skipping the locks is not essential for correctness, + since in case of deadlock we will just kill the later + transaction and retry it. But it can save some + unnecessary rollbacks and retries. */ + + return (FALSE); + } + #ifdef WITH_WSREP /* if BF thread is locking and has conflict with another BF thread, we need to look at trx ordering and lock types */ @@ -4094,7 +4134,8 @@ static trx_id_t lock_deadlock_search( /*=================*/ - lock_deadlock_ctx_t* ctx) /*!< in/out: deadlock context */ + lock_deadlock_ctx_t* ctx, /*!< in/out: deadlock context */ + struct thd_wait_reports*waitee_ptr) /*!< in/out: list of waitees */ { const lock_t* lock; ulint heap_no; @@ -4174,38 +4215,59 @@ lock_deadlock_search( /* Select the joining transaction as the victim. */ return(ctx->start->id); - } else if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + } else { + /* We do not need to report autoinc locks to the upper + layer. These locks are released before commit, so they + can not cause deadlocks with binlog-fixed commit + order. */ + if (waitee_ptr && + (lock_get_type_low(lock) != LOCK_TABLE || + lock_get_mode(lock) != LOCK_AUTO_INC)) { + if (waitee_ptr->used == + sizeof(waitee_ptr->waitees) / + sizeof(waitee_ptr->waitees[0])) { + waitee_ptr->next = + (struct thd_wait_reports *) + mem_alloc(sizeof(*waitee_ptr)); + waitee_ptr = waitee_ptr->next; + if (!waitee_ptr) { + ctx->too_deep = TRUE; + return(ctx->start->id); + } + waitee_ptr->next = NULL; + waitee_ptr->used = 0; + } + waitee_ptr->waitees[waitee_ptr->used++] = lock->trx; + } - /* Another trx ahead has requested a lock in an - incompatible mode, and is itself waiting for a lock. */ + if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - ++ctx->cost; + /* Another trx ahead has requested a lock in an + incompatible mode, and is itself waiting for a lock. */ - /* Save current search state. */ - if (!lock_deadlock_push(ctx, lock, heap_no)) { + ++ctx->cost; - /* Unable to save current search state, stack - size not big enough. */ + /* Save current search state. */ + if (!lock_deadlock_push(ctx, lock, heap_no)) { - ctx->too_deep = TRUE; + /* Unable to save current search state, stack + size not big enough. */ + + ctx->too_deep = TRUE; -#ifdef WITH_WSREP - if (wsrep_thd_is_BF(ctx->start->mysql_thd, TRUE)) - return(lock->trx->id); - else -#endif /* WITH_WSREP */ return(ctx->start->id); - } + } - ctx->wait_lock = lock->trx->lock.wait_lock; - lock = lock_get_first_lock(ctx, &heap_no); + ctx->wait_lock = lock->trx->lock.wait_lock; + lock = lock_get_first_lock(ctx, &heap_no); - if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + lock = lock_get_next_lock(ctx, lock, heap_no); + } + + } else { lock = lock_get_next_lock(ctx, lock, heap_no); } - - } else { - lock = lock_get_next_lock(ctx, lock, heap_no); } } @@ -4270,6 +4332,48 @@ lock_deadlock_trx_rollback( trx_mutex_exit(trx); } +static +void +lock_report_waiters_to_mysql( +/*=======================*/ + struct thd_wait_reports* waitee_buf_ptr, /*!< in: set of trxs */ + THD* mysql_thd, /*!< in: THD */ + trx_id_t victim_trx_id) /*!< in: Trx selected + as deadlock victim, if + any */ +{ + struct thd_wait_reports* p; + struct thd_wait_reports* q; + ulint i; + + p = waitee_buf_ptr; + while (p) { + i = 0; + while (i < p->used) { + trx_t *w_trx = p->waitees[i]; + /* There is no need to report waits to a trx already + selected as a victim. */ + if (w_trx->id != victim_trx_id) { + /* If thd_report_wait_for() decides to kill the + transaction, then we will get a call back into + innobase_kill_query. We mark this by setting + current_lock_mutex_owner, so we can avoid trying + to recursively take lock_sys->mutex. */ + w_trx->current_lock_mutex_owner = mysql_thd; + thd_report_wait_for(mysql_thd, w_trx->mysql_thd); + w_trx->current_lock_mutex_owner = NULL; + } + ++i; + } + q = p->next; + if (p != waitee_buf_ptr) { + mem_free(p); + } + p = q; + } +} + + /********************************************************************//** Checks if a joining lock request results in a deadlock. If a deadlock is found this function will resolve the dadlock by choosing a victim transaction @@ -4285,13 +4389,23 @@ lock_deadlock_check_and_resolve( const lock_t* lock, /*!< in: lock the transaction is requesting */ const trx_t* trx) /*!< in: transaction */ { - trx_id_t victim_trx_id; + trx_id_t victim_trx_id; + struct thd_wait_reports waitee_buf; + struct thd_wait_reports*waitee_buf_ptr; + THD* start_mysql_thd; ut_ad(trx != NULL); ut_ad(lock != NULL); ut_ad(lock_mutex_own()); assert_trx_in_list(trx); + start_mysql_thd = trx->mysql_thd; + if (start_mysql_thd && thd_need_wait_for(start_mysql_thd)) { + waitee_buf_ptr = &waitee_buf; + } else { + waitee_buf_ptr = NULL; + } + /* Try and resolve as many deadlocks as possible. */ do { lock_deadlock_ctx_t ctx; @@ -4304,7 +4418,19 @@ lock_deadlock_check_and_resolve( ctx.wait_lock = lock; ctx.mark_start = lock_mark_counter; - victim_trx_id = lock_deadlock_search(&ctx); + if (waitee_buf_ptr) { + waitee_buf_ptr->next = NULL; + waitee_buf_ptr->used = 0; + } + + victim_trx_id = lock_deadlock_search(&ctx, waitee_buf_ptr); + + /* Report waits to upper layer, as needed. */ + if (waitee_buf_ptr) { + lock_report_waiters_to_mysql(waitee_buf_ptr, + start_mysql_thd, + victim_trx_id); + } /* Search too deep, we rollback the joining transaction. */ if (ctx.too_deep) { diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index 7783fc1ac13..58ef40e468c 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -1950,6 +1950,7 @@ log_io_complete_checkpoint(void) /* Wake the redo log watching thread to parse the log up to this checkpoint. */ if (srv_track_changed_pages) { + os_event_reset(srv_redo_log_tracked_event); os_event_set(srv_checkpoint_completed_event); } } @@ -3681,8 +3682,8 @@ loop: /* Wake the log tracking thread which will then immediatelly quit because of srv_shutdown_state value */ if (srv_track_changed_pages) { + os_event_reset(srv_redo_log_tracked_event); os_event_set(srv_checkpoint_completed_event); - os_event_wait(srv_redo_log_thread_finished_event); } fil_close_all_files(); @@ -3759,6 +3760,7 @@ loop: /* Signal the log following thread to quit */ if (srv_track_changed_pages) { + os_event_reset(srv_redo_log_tracked_event); os_event_set(srv_checkpoint_completed_event); } @@ -3786,10 +3788,6 @@ loop: fil_flush_file_spaces(FIL_TABLESPACE); } - if (srv_track_changed_pages) { - os_event_wait(srv_redo_log_thread_finished_event); - } - fil_close_all_files(); /* Make some checks that the server really is quiet */ diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index 565c5720766..9a999fe832f 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -2918,7 +2918,7 @@ try_again: } ib_logf(IB_LOG_LEVEL_ERROR, - "Tried to read "ULINTPF" bytes at offset " UINT64PF". " + "Tried to read " ULINTPF " bytes at offset " UINT64PF ". " "Was only able to read %ld.", n, offset, (lint) ret); #endif /* __WIN__ */ retry = os_file_handle_error(NULL, "read"); @@ -3072,7 +3072,8 @@ os_file_write_func( DWORD len; ulint n_retries = 0; ulint err; - OVERLAPPED overlapped; + OVERLAPPED overlapped; + DWORD saved_error = 0; /* On 64-bit Windows, ulint is 64 bits. But offset and n should be no more than 32 bits. */ @@ -3099,7 +3100,7 @@ retry: if (ret) { ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, FALSE); } - else if(GetLastError() == ERROR_IO_PENDING) { + else if ( GetLastError() == ERROR_IO_PENDING) { ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, TRUE); } @@ -3127,8 +3128,10 @@ retry: } if (!os_has_said_disk_full) { + char *winmsg = NULL; - err = (ulint) GetLastError(); + saved_error = GetLastError(); + err = (ulint) saved_error; ut_print_timestamp(stderr); @@ -3145,6 +3148,23 @@ retry: name, offset, (ulong) n, (ulong) len, (ulong) err); + /* Ask Windows to prepare a standard message for a + GetLastError() */ + + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, saved_error, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPSTR)&winmsg, 0, NULL); + + if (winmsg) { + fprintf(stderr, + "InnoDB: FormatMessage: Error number %lu means '%s'.\n", + (ulong) saved_error, winmsg); + LocalFree(winmsg); + } + if (strerror((int) err) != NULL) { fprintf(stderr, "InnoDB: Error number %lu means '%s'.\n", @@ -3178,7 +3198,7 @@ retry: fprintf(stderr, " InnoDB: Error: Write to file %s failed" - " at offset "UINT64PF".\n" + " at offset " UINT64PF ".\n" "InnoDB: %lu bytes should have been written," " only %ld were written.\n" "InnoDB: Operating system error number %lu.\n" @@ -4744,8 +4764,10 @@ os_aio_func( wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER; mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER); - if (mode == OS_AIO_SYNC) - { + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + mode = OS_AIO_SYNC;); + + if (mode == OS_AIO_SYNC) { ibool ret; /* This is actually an ordinary synchronous read or write: no need to use an i/o-handler thread */ @@ -4759,7 +4781,18 @@ os_aio_func( ret = os_file_write(name, file, buf, offset, n); } - ut_a(ret); + + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + errno = 28;); + + if (!ret) { + fprintf(stderr, "FAIL"); + } + return ret; } @@ -5588,7 +5621,13 @@ consecutive_loop: aio_slot->offset, total_len); } - ut_a(ret); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + errno = 28;); + srv_set_io_thread_op_info(global_segment, "file i/o done"); if (aio_slot->type == OS_FILE_READ && n_consecutive > 1) { diff --git a/storage/xtradb/os/os0stacktrace.cc b/storage/xtradb/os/os0stacktrace.cc index 4d52e625057..f7fb1212e5f 100644 --- a/storage/xtradb/os/os0stacktrace.cc +++ b/storage/xtradb/os/os0stacktrace.cc @@ -85,16 +85,16 @@ os_stacktrace_print( caller_address = (void*) uc->uc_mcontext.gregs[REG_RIP] ; #elif defined(__hppa__) ucontext_t* uc = (ucontext_t*) ucontext; - caller_address = (void*) uc->uc_mcontext.sc_iaoq[0] & ~0×3UL ; + caller_address = (void*) uc->uc_mcontext.sc_iaoq[0] & ~0x3UL ; #elif (defined (__ppc__)) || (defined (__powerpc__)) ucontext_t* uc = (ucontext_t*) ucontext; caller_address = (void*) uc->uc_mcontext.regs->nip ; #elif defined(__sparc__) struct sigcontext* sc = (struct sigcontext*) ucontext; #if __WORDSIZE == 64 - caller_address = (void*) scp->sigc_regs.tpc ; + caller_address = (void*) sc->sigc_regs.tpc ; #else - pnt = (void*) scp->si_regs.pc ; + caller_address = (void*) sc->si_regs.pc ; #endif #elif defined(__i386__) ucontext_t* uc = (ucontext_t*) ucontext; diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc index 1e533a4bbbe..c4c8354aa1e 100644 --- a/storage/xtradb/page/page0zip.cc +++ b/storage/xtradb/page/page0zip.cc @@ -2,6 +2,7 @@ Copyright (c) 2005, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2014, SkySQL Ab. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1313,6 +1314,30 @@ page_zip_compress( MONITOR_INC(MONITOR_PAGE_COMPRESS); + /* Simulate a compression failure with a probability determined by + innodb_simulate_comp_failures, only if the page has 2 or more + records. */ + + if (srv_simulate_comp_failures + && !dict_index_is_ibuf(index) + && page_get_n_recs(page) >= 2 + && ((ulint)(rand() % 100) < srv_simulate_comp_failures) + && strcasecmp(index->table_name, "IBUF_DUMMY") != 0) { + +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: Simulating a compression failure" + " for table %s, index %s, page %lu (%s)\n", + index->table_name, + index->name, + page_get_page_no(page), + page_is_leaf(page) ? "leaf" : "non-leaf"); + +#endif + + goto err_exit; + } + heap = mem_heap_create(page_zip_get_size(page_zip) + n_fields * (2 + sizeof(ulint)) + REC_OFFS_HEADER_SIZE diff --git a/storage/xtradb/read/read0read.cc b/storage/xtradb/read/read0read.cc index 887e1717769..c350e24dbb0 100644 --- a/storage/xtradb/read/read0read.cc +++ b/storage/xtradb/read/read0read.cc @@ -221,7 +221,7 @@ views contiguously, one identical in size and content as @param view (starting at returned pointer) and another view immediately following the trx_ids array. The second view will have space for an extra trx_id_t element. @return read view struct */ -UNIV_INLINE +UNIV_INTERN read_view_t* read_view_clone( /*============*/ @@ -256,7 +256,7 @@ read_view_clone( /*********************************************************************//** Insert the view in the proper order into the trx_sys->view_list. The read view list is ordered by read_view_t::low_limit_no in descending order. */ -static +UNIV_INTERN void read_view_add( /*==========*/ diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc index 444fac87842..ecbf9425f43 100644 --- a/storage/xtradb/row/row0ins.cc +++ b/storage/xtradb/row/row0ins.cc @@ -151,35 +151,37 @@ row_ins_alloc_sys_fields( ut_ad(row && table && heap); ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table)); - /* 1. Allocate buffer for row id */ + /* allocate buffer to hold the needed system created hidden columns. */ + uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN; + ptr = static_cast<byte*>(mem_heap_zalloc(heap, len)); + /* 1. Populate row-id */ col = dict_table_get_sys_col(table, DATA_ROW_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROW_ID_LEN)); - dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN); node->row_id_buf = ptr; - /* 3. Allocate buffer for trx id */ + ptr += DATA_ROW_ID_LEN; + /* 2. Populate trx id */ col = dict_table_get_sys_col(table, DATA_TRX_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_TRX_ID_LEN)); dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN); node->trx_id_buf = ptr; - /* 4. Allocate buffer for roll ptr */ + ptr += DATA_TRX_ID_LEN; + + /* 3. Populate roll ptr */ col = dict_table_get_sys_col(table, DATA_ROLL_PTR); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROLL_PTR_LEN)); dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN); } diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index 80cda9078ff..e074604e3cb 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -786,7 +786,7 @@ row_merge_read( if (UNIV_UNLIKELY(!success)) { ut_print_timestamp(stderr); fprintf(stderr, - " InnoDB: failed to read merge block at "UINT64PF"\n", + " InnoDB: failed to read merge block at " UINT64PF "\n", ofs); } diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index c65c39b7971..671714c5b3d 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -1358,7 +1358,7 @@ error_exit: if (doc_id < next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be large than" - " "UINT64PF" for table", + " " UINT64PF " for table", next_doc_id - 1); ut_print_name(stderr, trx, TRUE, table->name); putc('\n', stderr); @@ -1373,9 +1373,9 @@ error_exit: if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " used Doc ID "UINT64PF" cannot" + " used Doc ID " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, next_doc_id - 1, FTS_DOC_ID_MAX_STEP); diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 67107c34204..fd50e2240b5 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -878,16 +878,15 @@ row_sel_get_clust_rec( if (!node->read_view) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level - we lock only the record, i.e., next-key locking is - not used. */ ulint lock_type; trx_t* trx; trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { lock_type = LOCK_REC_NOT_GAP; @@ -1505,12 +1504,6 @@ rec_loop: search result set, resulting in the phantom problem. */ if (!consistent_read) { - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation - level, we lock only the record, i.e., next-key - locking is not used. */ - rec_t* next_rec = page_rec_get_next(rec); ulint lock_type; trx_t* trx; @@ -1520,6 +1513,10 @@ rec_loop: offsets = rec_get_offsets(next_rec, index, offsets, ULINT_UNDEFINED, &heap); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation + level, we lock only the record, i.e., next-key + locking is not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -1568,12 +1565,6 @@ skip_lock: if (!consistent_read) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level, - we lock only the record, i.e., next-key locking is - not used. */ - ulint lock_type; trx_t* trx; @@ -1582,6 +1573,10 @@ skip_lock: trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level, + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -4228,7 +4223,7 @@ rec_loop: /* Try to place a lock on the index record */ /* If innodb_locks_unsafe_for_binlog option is used - or this session is using a READ COMMITTED isolation + or this session is using a READ COMMITTED or lower isolation level we do not lock gaps. Supremum record is really a gap and therefore we do not set locks there. */ @@ -4379,7 +4374,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), @@ -4428,7 +4423,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), diff --git a/storage/xtradb/srv/srv0mon.cc b/storage/xtradb/srv/srv0mon.cc index ea346566e57..64417b1e5fb 100644 --- a/storage/xtradb/srv/srv0mon.cc +++ b/storage/xtradb/srv/srv0mon.cc @@ -41,8 +41,8 @@ Created 12/9/2009 Jimmy Yang /* Macro to standardize the counter names for counters in the "monitor_buf_page" module as they have very structured defines */ #define MONITOR_BUF_PAGE(name, description, code, op, op_code) \ - {"buffer_page_"op"_"name, "buffer_page_io", \ - "Number of "description" Pages "op, \ + {"buffer_page_" op "_" name, "buffer_page_io", \ + "Number of " description " Pages " op, \ MONITOR_GROUP_MODULE, MONITOR_DEFAULT_START, \ MONITOR_##code##_##op_code} diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 6665504e868..e8343b4c620 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -3,6 +3,7 @@ Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. +Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -633,6 +634,9 @@ current_time % 5 != 0. */ ? thd_lock_wait_timeout((trx)->mysql_thd) \ : 0) +/** Simulate compression failures. */ +UNIV_INTERN uint srv_simulate_comp_failures = 0; + /* IMPLEMENTATION OF THE SERVER MAIN PROGRAM ========================================= @@ -760,7 +764,9 @@ static const ulint SRV_MASTER_SLOT = 0; UNIV_INTERN os_event_t srv_checkpoint_completed_event; -UNIV_INTERN os_event_t srv_redo_log_thread_finished_event; +UNIV_INTERN os_event_t srv_redo_log_tracked_event; + +UNIV_INTERN bool srv_redo_log_thread_started = false; /*********************************************************************//** Prints counters for work done by srv_master_thread. */ @@ -1114,7 +1120,10 @@ srv_init(void) srv_checkpoint_completed_event = os_event_create(); - srv_redo_log_thread_finished_event = os_event_create(); + if (srv_track_changed_pages) { + srv_redo_log_tracked_event = os_event_create(); + os_event_set(srv_redo_log_tracked_event); + } UT_LIST_INIT(srv_sys->tasks); } @@ -2334,6 +2343,7 @@ DECLARE_THREAD(srv_redo_log_follow_thread)( #endif my_thread_init(); + srv_redo_log_thread_started = true; do { os_event_wait(srv_checkpoint_completed_event); @@ -2353,13 +2363,15 @@ DECLARE_THREAD(srv_redo_log_follow_thread)( "stopping log tracking thread!\n"); break; } + os_event_set(srv_redo_log_tracked_event); } } while (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE); srv_track_changed_pages = FALSE; log_online_read_shutdown(); - os_event_set(srv_redo_log_thread_finished_event); + os_event_set(srv_redo_log_tracked_event); + srv_redo_log_thread_started = false; /* Defensive, not required */ my_thread_end(); os_thread_exit(NULL); diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index c8fbbd74344..f2a511520de 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -2261,9 +2261,9 @@ innobase_start_or_create_for_mysql(void) } else if (size != srv_log_file_size) { ib_logf(IB_LOG_LEVEL_ERROR, "Log file %s is" - " of different size "UINT64PF" bytes" + " of different size " UINT64PF " bytes" " than other log" - " files "UINT64PF" bytes!", + " files " UINT64PF " bytes!", logfilename, size << UNIV_PAGE_SIZE_SHIFT, (os_offset_t) srv_log_file_size diff --git a/storage/xtradb/sync/sync0arr.cc b/storage/xtradb/sync/sync0arr.cc index 126cf8de0d5..95ecf496e82 100644 --- a/storage/xtradb/sync/sync0arr.cc +++ b/storage/xtradb/sync/sync0arr.cc @@ -2,6 +2,7 @@ Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. +Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -183,6 +184,33 @@ sync_array_get_nth_cell( } /******************************************************************//** +Looks for a cell with the given thread id. +@return pointer to cell or NULL if not found */ +static +sync_cell_t* +sync_array_find_thread( +/*===================*/ + sync_array_t* arr, /*!< in: wait array */ + os_thread_id_t thread) /*!< in: thread id */ +{ + ulint i; + sync_cell_t* cell; + + for (i = 0; i < arr->n_cells; i++) { + + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object != NULL + && os_thread_eq(cell->thread, thread)) { + + return(cell); /* Found */ + } + } + + return(NULL); /* Not found */ +} + +/******************************************************************//** Reserves the mutex semaphore protecting a sync array. */ static void @@ -441,8 +469,10 @@ static void sync_array_cell_print( /*==================*/ - FILE* file, /*!< in: file where to print */ - sync_cell_t* cell) /*!< in: sync cell */ + FILE* file, /*!< in: file where to print */ + sync_cell_t* cell, /*!< in: sync cell */ + os_thread_id_t* reserver) /*!< out: write reserver or + 0 */ { ib_mutex_t* mutex; ib_prio_mutex_t* prio_mutex; @@ -460,16 +490,9 @@ sync_array_cell_print( innobase_basename(cell->file), (ulong) cell->line, difftime(time(NULL), cell->reservation_time)); - /* If stacktrace feature is enabled we will send a SIGUSR2 - signal to thread waiting for the semaphore. Signal handler - will then dump the current stack to error log. */ - if (srv_use_stacktrace) { -#ifdef __linux__ - pthread_kill(cell->thread, SIGUSR2); -#endif - } if (type == SYNC_MUTEX || type == SYNC_PRIO_MUTEX) { + /* We use old_wait_mutex in case the cell has already been freed meanwhile */ if (type == SYNC_MUTEX) { @@ -483,18 +506,29 @@ sync_array_cell_print( } - fprintf(file, - "Mutex at %p '%s', lock var %lu\n" + if (mutex) { + fprintf(file, + "Mutex at %p '%s', lock var %lu\n" #ifdef UNIV_SYNC_DEBUG - "Last time reserved in file %s line %lu, " + "Last time reserved in file %s line %lu, " #endif /* UNIV_SYNC_DEBUG */ - "waiters flag %lu\n", - (void*) mutex, mutex->cmutex_name, - (ulong) mutex->lock_word, + "waiters flag %lu\n", + (void*) mutex, mutex->cmutex_name, + (ulong) mutex->lock_word, #ifdef UNIV_SYNC_DEBUG - mutex->file_name, (ulong) mutex->line, + mutex->file_name, (ulong) mutex->line, #endif /* UNIV_SYNC_DEBUG */ - (ulong) mutex->waiters); + (ulong) mutex->waiters); + } + + /* If stacktrace feature is enabled we will send a SIGUSR2 + signal to thread waiting for the semaphore. Signal handler + will then dump the current stack to error log. */ + if (srv_use_stacktrace && cell && cell->thread) { +#ifdef __linux__ + pthread_kill(cell->thread, SIGUSR2); +#endif + } if (type == SYNC_PRIO_MUTEX) { @@ -529,40 +563,47 @@ sync_array_cell_print( rwlock = &prio_rwlock->base_lock; } - fprintf(file, - " RW-latch at %p '%s'\n", - (void*) rwlock, rwlock->lock_name); - writer = rw_lock_get_writer(rwlock); - if (writer != RW_LOCK_NOT_LOCKED) { + if (rwlock) { fprintf(file, - "a writer (thread id %lu) has" - " reserved it in mode %s", - (ulong) os_thread_pf(rwlock->writer_thread), - writer == RW_LOCK_EX - ? " exclusive\n" - : " wait exclusive\n"); - } + " RW-latch at %p '%s'\n", + (void*) rwlock, rwlock->lock_name); - fprintf(file, - "number of readers %lu, waiters flag %lu, " - "lock_word: %lx\n" - "Last time read locked in file %s line %lu\n" - "Last time write locked in file %s line %lu\n", - (ulong) rw_lock_get_reader_count(rwlock), - (ulong) rwlock->waiters, - rwlock->lock_word, - innobase_basename(rwlock->last_s_file_name), - (ulong) rwlock->last_s_line, - rwlock->last_x_file_name, - (ulong) rwlock->last_x_line); + writer = rw_lock_get_writer(rwlock); - /* If stacktrace feature is enabled we will send a SIGUSR2 - signal to thread that has locked RW-latch with write mode. - Signal handler will then dump the current stack to error log. */ - if (writer != RW_LOCK_NOT_LOCKED && srv_use_stacktrace) { + if (writer && writer != RW_LOCK_NOT_LOCKED) { + fprintf(file, + "a writer (thread id %lu) has" + " reserved it in mode %s", + (ulong) os_thread_pf(rwlock->writer_thread), + writer == RW_LOCK_EX + ? " exclusive\n" + : " wait exclusive\n"); + + *reserver = rwlock->writer_thread; + } + + fprintf(file, + "number of readers %lu, waiters flag %lu, " + "lock_word: %lx\n" + "Last time read locked in file %s line %lu\n" + "Last time write locked in file %s line %lu\n", + (ulong) rw_lock_get_reader_count(rwlock), + (ulong) rwlock->waiters, + rwlock->lock_word, + innobase_basename(rwlock->last_s_file_name), + (ulong) rwlock->last_s_line, + rwlock->last_x_file_name, + (ulong) rwlock->last_x_line); + + /* If stacktrace feature is enabled we will send a SIGUSR2 + signal to thread that has locked RW-latch with write mode. + Signal handler will then dump the current stack to error log. */ + if (writer != RW_LOCK_NOT_LOCKED && srv_use_stacktrace && + rwlock && rwlock->writer_thread) { #ifdef __linux__ - pthread_kill(rwlock->writer_thread, SIGUSR2); + pthread_kill(rwlock->writer_thread, SIGUSR2); #endif + } } if (prio_rwlock) { @@ -584,32 +625,6 @@ sync_array_cell_print( } #ifdef UNIV_SYNC_DEBUG -/******************************************************************//** -Looks for a cell with the given thread id. -@return pointer to cell or NULL if not found */ -static -sync_cell_t* -sync_array_find_thread( -/*===================*/ - sync_array_t* arr, /*!< in: wait array */ - os_thread_id_t thread) /*!< in: thread id */ -{ - ulint i; - sync_cell_t* cell; - - for (i = 0; i < arr->n_cells; i++) { - - cell = sync_array_get_nth_cell(arr, i); - - if (cell->wait_object != NULL - && os_thread_eq(cell->thread, thread)) { - - return(cell); /* Found */ - } - } - - return(NULL); /* Not found */ -} /******************************************************************//** Recursion step for deadlock detection. @@ -671,6 +686,7 @@ sync_array_detect_deadlock( os_thread_id_t thread; ibool ret; rw_lock_debug_t*debug; + os_thread_id_t r = 0; ut_a(arr); ut_a(start); @@ -715,7 +731,7 @@ sync_array_detect_deadlock( "Mutex %p owned by thread %lu file %s line %lu\n", mutex, (ulong) os_thread_pf(mutex->thread_id), mutex->file_name, (ulong) mutex->line); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &r); return(TRUE); } @@ -754,7 +770,7 @@ sync_array_detect_deadlock( print: fprintf(stderr, "rw-lock %p ", (void*) lock); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &r); rw_lock_debug_print(stderr, debug); return(TRUE); } @@ -1009,6 +1025,7 @@ sync_array_print_long_waits_low( double diff; sync_cell_t* cell; void* wait_object; + os_thread_id_t reserver=0; cell = sync_array_get_nth_cell(arr, i); @@ -1024,7 +1041,7 @@ sync_array_print_long_waits_low( if (diff > SYNC_ARRAY_TIMEOUT) { fputs("InnoDB: Warning: a long semaphore wait:\n", stderr); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); *noticed = TRUE; } @@ -1039,6 +1056,57 @@ sync_array_print_long_waits_low( } } + /* We found a long semaphore wait, wait all threads that are + waiting for a semaphore. */ + if (*noticed) { + for (i = 0; i < arr->n_cells; i++) { + void* wait_object; + sync_cell_t* cell; + os_thread_id_t reserver=(os_thread_id_t)ULINT_UNDEFINED; + ulint loop=0; + + cell = sync_array_get_nth_cell(arr, i); + + wait_object = cell->wait_object; + + if (wait_object == NULL || !cell->waiting) { + + continue; + } + + fputs("InnoDB: Warning: semaphore wait:\n", + stderr); + sync_array_cell_print(stderr, cell, &reserver); + + /* Try to output cell information for writer recursive way */ + while (reserver != (os_thread_id_t)ULINT_UNDEFINED) { + sync_cell_t* reserver_wait; + + reserver_wait = sync_array_find_thread(arr, reserver); + + if (reserver_wait && + reserver_wait->wait_object != NULL && + reserver_wait->waiting) { + fputs("InnoDB: Warning: Writer thread is waiting this semaphore:\n", + stderr); + sync_array_cell_print(stderr, reserver_wait, &reserver); + + if (reserver_wait->thread == reserver) { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + } else { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + + /* This is protection against loop */ + if (loop > 100) { + fputs("InnoDB: Warning: Too many waiting threads.\n", stderr); + break; + } + } + } + } + #undef SYNC_ARRAY_TIMEOUT return(fatal); @@ -1125,12 +1193,13 @@ sync_array_print_info_low( for (i = 0; count < arr->n_reserved; ++i) { sync_cell_t* cell; + os_thread_id_t r = 0; cell = sync_array_get_nth_cell(arr, i); if (cell->wait_object != NULL) { count++; - sync_array_cell_print(file, cell); + sync_array_cell_print(file, cell, &r); } } } diff --git a/storage/xtradb/sync/sync0rw.cc b/storage/xtradb/sync/sync0rw.cc index 2ff75b55cf6..79741e3cdce 100644 --- a/storage/xtradb/sync/sync0rw.cc +++ b/storage/xtradb/sync/sync0rw.cc @@ -151,18 +151,12 @@ UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key; To modify the debug info list of an rw-lock, this mutex has to be acquired in addition to the mutex protecting the lock. */ -UNIV_INTERN ib_mutex_t rw_lock_debug_mutex; +UNIV_INTERN os_fast_mutex_t rw_lock_debug_mutex; # ifdef UNIV_PFS_MUTEX UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key; # endif -/* If deadlock detection does not get immediately the mutex, -it may wait for this event */ -UNIV_INTERN os_event_t rw_lock_debug_event; -/* This is set to TRUE, if there may be waiters for the event */ -UNIV_INTERN ibool rw_lock_debug_waiters; - /******************************************************************//** Creates a debug info struct. */ static @@ -920,22 +914,7 @@ void rw_lock_debug_mutex_enter(void) /*===========================*/ { -loop: - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_reset(rw_lock_debug_event); - - rw_lock_debug_waiters = TRUE; - - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_wait(rw_lock_debug_event); - - goto loop; + os_fast_mutex_lock(&rw_lock_debug_mutex); } /******************************************************************//** @@ -945,12 +924,7 @@ void rw_lock_debug_mutex_exit(void) /*==========================*/ { - mutex_exit(&rw_lock_debug_mutex); - - if (rw_lock_debug_waiters) { - rw_lock_debug_waiters = FALSE; - os_event_set(rw_lock_debug_event); - } + os_fast_mutex_unlock(&rw_lock_debug_mutex); } /******************************************************************//** diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc index e698b7dcf10..e078f223efc 100644 --- a/storage/xtradb/sync/sync0sync.cc +++ b/storage/xtradb/sync/sync0sync.cc @@ -1598,11 +1598,7 @@ sync_init(void) SYNC_NO_ORDER_CHECK); #ifdef UNIV_SYNC_DEBUG - mutex_create(rw_lock_debug_mutex_key, &rw_lock_debug_mutex, - SYNC_NO_ORDER_CHECK); - - rw_lock_debug_event = os_event_create(); - rw_lock_debug_waiters = FALSE; + os_fast_mutex_init(rw_lock_debug_mutex_key, &rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ } @@ -1676,6 +1672,7 @@ sync_close(void) sync_order_checks_on = FALSE; sync_thread_level_arrays_free(); + os_fast_mutex_free(&rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ sync_initialized = FALSE; @@ -1690,12 +1687,12 @@ sync_print_wait_info( FILE* file) /*!< in: file where to print */ { fprintf(file, - "Mutex spin waits "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-shared spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-excl spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n", + "Mutex spin waits " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-shared spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-excl spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n", (ib_uint64_t) mutex_spin_wait_count, (ib_uint64_t) mutex_spin_round_count, (ib_uint64_t) mutex_os_wait_count, diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc index f5d4a6c862f..794ee432ca4 100644 --- a/storage/xtradb/trx/trx0i_s.cc +++ b/storage/xtradb/trx/trx0i_s.cc @@ -1653,7 +1653,7 @@ trx_i_s_create_lock_id( } else { /* table lock */ res_len = ut_snprintf(lock_id, lock_id_size, - TRX_ID_FMT":"UINT64PF, + TRX_ID_FMT":" UINT64PF, row->lock_trx_id, row->lock_table_id); } diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc index d26517637cd..fece132e84a 100644 --- a/storage/xtradb/trx/trx0trx.cc +++ b/storage/xtradb/trx/trx0trx.cc @@ -51,6 +51,9 @@ Created 3/26/1996 Heikki Tuuri #include<set> +extern "C" +int thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2); + /** Set of table_id */ typedef std::set<table_id_t> table_id_set; @@ -1723,6 +1726,38 @@ trx_assign_read_view( return(trx->read_view); } +/********************************************************************//** +Clones the read view from another transaction. All consistent reads within +the receiver transaction will get the same read view as the donor transaction +@return read view clone */ +UNIV_INTERN +read_view_t* +trx_clone_read_view( +/*================*/ + trx_t* trx, /*!< in: receiver transaction */ + trx_t* from_trx) /*!< in: donor transaction */ +{ + ut_ad(lock_mutex_own()); + ut_ad(mutex_own(&trx_sys->mutex)); + ut_ad(trx_mutex_own(from_trx)); + ut_ad(trx->read_view == NULL); + + if (from_trx->state != TRX_STATE_ACTIVE || + from_trx->read_view == NULL) { + + return(NULL); + } + + trx->read_view = read_view_clone(from_trx->read_view, + trx->prebuilt_view); + + read_view_add(trx->read_view); + + trx->global_read_view = trx->read_view; + + return(trx->read_view); +} + /****************************************************************//** Prepares a transaction for commit/rollback. */ UNIV_INTERN @@ -2069,7 +2104,7 @@ state_ok: if (trx->undo_no != 0) { newline = TRUE; - fprintf(f, ", undo log entries "TRX_ID_FMT, trx->undo_no); + fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); } if (newline) { @@ -2172,9 +2207,8 @@ trx_assert_started( #endif /* UNIV_DEBUG */ /*******************************************************************//** -Compares the "weight" (or size) of two transactions. Transactions that -have edited non-transactional tables are considered heavier than ones -that have not. +Compares the "weight" (or size) of two transactions. The heavier the weight, +the more reluctant we will be to choose the transaction as a deadlock victim. @return TRUE if weight(a) >= weight(b) */ UNIV_INTERN ibool @@ -2183,26 +2217,19 @@ trx_weight_ge( const trx_t* a, /*!< in: the first transaction to be compared */ const trx_t* b) /*!< in: the second transaction to be compared */ { - ibool a_notrans_edit; - ibool b_notrans_edit; - - /* If mysql_thd is NULL for a transaction we assume that it has - not edited non-transactional tables. */ - - a_notrans_edit = a->mysql_thd != NULL - && thd_has_edited_nontrans_tables(a->mysql_thd); - - b_notrans_edit = b->mysql_thd != NULL - && thd_has_edited_nontrans_tables(b->mysql_thd); - - if (a_notrans_edit != b_notrans_edit) { + int pref; - return(a_notrans_edit); + /* First ask the upper server layer if it has any preference for which + to prefer as a deadlock victim. */ + pref= thd_deadlock_victim_preference(a->mysql_thd, b->mysql_thd); + if (pref < 0) { + return FALSE; + } else if (pref > 0) { + return TRUE; } - /* Either both had edited non-transactional tables or both had - not, we fall back to comparing the number of altered/locked - rows. */ + /* Upper server layer had no preference, we fall back to comparing the + number of altered/locked rows. */ #if 0 fprintf(stderr, @@ -2369,7 +2396,7 @@ trx_recover_for_mysql( ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Transaction contains changes" - " to "TRX_ID_FMT" rows\n", + " to " TRX_ID_FMT " rows\n", trx->undo_no); count++; diff --git a/strings/CMakeLists.txt b/strings/CMakeLists.txt index e1cee8d9824..6291d107d90 100644 --- a/strings/CMakeLists.txt +++ b/strings/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates +# Copyright (c) 2006, 2014, Oracle and/or its affiliates # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -36,7 +36,6 @@ ADD_CONVENIENCE_LIBRARY(strings ${STRINGS_SOURCES}) ADD_EXECUTABLE(conf_to_src EXCLUDE_FROM_ALL conf_to_src.c) TARGET_LINK_LIBRARIES(conf_to_src strings) -INSTALL_DEBUG_SYMBOLS(strings) IF(MSVC) INSTALL_DEBUG_TARGET(strings DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c index 6e861f38ae4..4beb7047d00 100644 --- a/strings/ctype-bin.c +++ b/strings/ctype-bin.c @@ -1,5 +1,6 @@ /* Copyright (c) 2002-2007 MySQL AB & tommy@valley.ne.jp - Copyright (c) 2009-2011, Monty Program Ab + Copyright (c) 2002, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public diff --git a/strings/ctype-eucjpms.c b/strings/ctype-eucjpms.c index d9033a234c4..20972fbe136 100644 --- a/strings/ctype-eucjpms.c +++ b/strings/ctype-eucjpms.c @@ -1,5 +1,6 @@ -/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. - Copyright (c) 2009-2011, Monty Program Ab +/* Copyright (c) 2002 MySQL AB & tommy@valley.ne.jp + Copyright (c) 2002, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index a7f948ebe3a..d2f7d34f0aa 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -2202,10 +2202,10 @@ my_strnncollsp_utf32(CHARSET_INFO *cs, static uint my_ismbchar_utf32(CHARSET_INFO *cs __attribute__((unused)), - const char *b __attribute__((unused)), - const char *e __attribute__((unused))) + const char *b, + const char *e) { - return 4; + return b + 4 > e ? 0 : 4; } @@ -3159,10 +3159,10 @@ static int my_strnncollsp_ucs2(CHARSET_INFO *cs __attribute__((unused)), static uint my_ismbchar_ucs2(CHARSET_INFO *cs __attribute__((unused)), - const char *b __attribute__((unused)), - const char *e __attribute__((unused))) + const char *b, + const char *e) { - return 2; + return b + 2 > e ? 0 : 2; } diff --git a/strings/ctype-ujis.c b/strings/ctype-ujis.c index 9a69995b278..a6d9fb96fb1 100644 --- a/strings/ctype-ujis.c +++ b/strings/ctype-ujis.c @@ -1,6 +1,6 @@ -/* Copyright tommy@valley.ne.jp. - Copyright (c) 2002, 2011, Oracle and/or its affiliates. - Copyright (c) 2009-2011, Monty Program Ab +/* Copyright (c) 2002 MySQL AB & tommy@valley.ne.jp + Copyright (c) 2002, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public diff --git a/support-files/CMakeLists.txt b/support-files/CMakeLists.txt index 6734f3c78fb..a66adc19b13 100644 --- a/support-files/CMakeLists.txt +++ b/support-files/CMakeLists.txt @@ -1,5 +1,5 @@ -# Copyright (c) 2006, 2010, Oracle and/or its affiliates. -# Copyright (c) 2012, Monty Program Ab +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. +# Copyright (c) 2012, 2014, SkySQL Ab. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -56,12 +56,14 @@ IF(UNIX) IF(script MATCHES ".ini") SET(comp IniFiles) + SET(permissions OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ) ELSE() SET(comp Server_Scripts) + SET(permissions OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) ENDIF() INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${script} DESTINATION ${inst_location} COMPONENT ${comp} - PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) + PERMISSIONS ${permissions}) ENDFOREACH() IF(INSTALL_SUPPORTFILESDIR) INSTALL(FILES magic DESTINATION ${inst_location} COMPONENT SupportFiles) diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh index c12f4cf4a3a..025b9e8cf31 100644 --- a/support-files/mysql.server.sh +++ b/support-files/mysql.server.sh @@ -262,6 +262,11 @@ wait_for_ready () { if $bindir/mysqladmin ping >/dev/null 2>&1; then log_success_msg return 0 + elif kill -0 $! 2>/dev/null ; then + : # mysqld_safe is still running + else + # mysqld_safe is no longer running, abort the wait loop + break fi if test -e $sst_progress_file && [ $startup_sleep -ne 10 ];then diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 034e0acd8b0..5d8e75e9302 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -30,7 +30,7 @@ %global mysqld_group mysql %global mysqldatadir /var/lib/mysql -%global release 1 +%global release 2 # @@ -538,6 +538,7 @@ mkdir debug # XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM ${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \ -DCMAKE_BUILD_TYPE=Debug \ + -DENABLE_DTRACE=OFF \ -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ -DFEATURE_SET="%{feature_set}" \ -DCOMPILATION_COMMENT="%{compilation_comment_debug}" \ @@ -556,6 +557,7 @@ mkdir release # XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM ${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \ -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DENABLE_DTRACE=OFF \ -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ -DFEATURE_SET="%{feature_set}" \ -DCOMPILATION_COMMENT="%{compilation_comment_release}" \ @@ -1279,6 +1281,9 @@ echo "=====" >> $STATUS_HISTORY # merging BK trees) ############################################################################## %changelog +* Wed Jul 02 2014 Bjorn Munch <bjorn.munch@oracle.com> +- Disable dtrace unconditionally, breaks after we install Oracle dtrace + * Wed Oct 30 2013 Balasubramanian Kandasamy <balasubramanian.kandasamy@oracle.com> - Removed non gpl file docs/mysql.info from community packages diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 898d67c5058..78dd24fe6d4 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -18616,7 +18616,7 @@ static void test_bug56976() const char* query = "SELECT LENGTH(?)"; char *long_buffer; unsigned long i, packet_len = 256 * 1024L; - unsigned long dos_len = 2 * 1024 * 1024L; + unsigned long dos_len = 35000000; DBUG_ENTER("test_bug56976"); myheader("test_bug56976"); @@ -19255,6 +19255,106 @@ static void test_mdev4326() myquery(rc); } + +/* + Check compressed protocol +*/ + +static void test_compressed_protocol() +{ + MYSQL *mysql_local; + char query[4096], *end; + int i; + myheader("test_compressed_protocol"); + + if (!(mysql_local= mysql_client_init(NULL))) + { + fprintf(stderr, "\n mysql_client_init() failed"); + exit(1); + } + + if (!(mysql_real_connect(mysql_local, opt_host, opt_user, + opt_password, current_db, opt_port, + opt_unix_socket, CLIENT_COMPRESS))) + { + fprintf(stderr, "\n connection failed(%s)", mysql_error(mysql_local)); + exit(1); + } + mysql_options(mysql_local,MYSQL_OPT_COMPRESS,NullS); + + end= strmov(strfill(strmov(query, "select length(\""),1000,'a'),"\")"); + + for (i=0 ; i < 2 ; i++) + { + MYSQL_RES *res; + + int rc= mysql_real_query(mysql, query, (int) (end-query)); + myquery(rc); + res= mysql_store_result(mysql); + DBUG_ASSERT(res != 0); + mysql_free_result(res); + } + + mysql_close(mysql_local); +} + +/* + Check big packets +*/ + +static void test_big_packet() +{ + MYSQL *mysql_local; + char *query, *end; + /* We run the tests with a server with max packet size of 3200000 */ + size_t big_packet= 31000000L; + int i; + MYSQL_PARAMETERS *mysql_params= mysql_get_parameters(); + long org_max_allowed_packet= *mysql_params->p_max_allowed_packet; + long opt_net_buffer_length= *mysql_params->p_net_buffer_length; + + myheader("test_big_packet"); + + query= (char*) my_malloc(big_packet+1024, MYF(MY_WME)); + DIE_UNLESS(query); + + if (!(mysql_local= mysql_client_init(NULL))) + { + fprintf(stderr, "\n mysql_client_init() failed"); + exit(1); + } + + if (!(mysql_real_connect(mysql_local, opt_host, opt_user, + opt_password, current_db, opt_port, + opt_unix_socket, 0))) + { + fprintf(stderr, "\n connection failed(%s)", mysql_error(mysql_local)); + exit(1); + } + + *mysql_params->p_max_allowed_packet= big_packet+1000; + *mysql_params->p_net_buffer_length= 8L*256L*256L; + + end= strmov(strfill(strmov(query, "select length(\""), big_packet,'a'),"\")"); + + for (i=0 ; i < 2 ; i++) + { + MYSQL_RES *res; + int rc= mysql_real_query(mysql, query, (int) (end-query)); + myquery(rc); + res= mysql_store_result(mysql); + DBUG_ASSERT(res != 0); + mysql_free_result(res); + } + + mysql_close(mysql_local); + my_free(query); + + *mysql_params->p_max_allowed_packet= org_max_allowed_packet; + *mysql_params->p_net_buffer_length = opt_net_buffer_length; +} + + static struct my_tests_st my_tests[]= { { "disable_query_logs", disable_query_logs }, { "test_view_sp_list_fields", test_view_sp_list_fields }, @@ -19526,6 +19626,8 @@ static struct my_tests_st my_tests[]= { { "test_bug13001491", test_bug13001491 }, { "test_mdev4326", test_mdev4326 }, { "test_ps_sp_out_params", test_ps_sp_out_params }, + { "test_compressed_protocol", test_compressed_protocol }, + { "test_big_packet", test_big_packet }, { 0, 0 } }; diff --git a/vio/CMakeLists.txt b/vio/CMakeLists.txt index 72059f1ec08..2fb82ef9dd2 100644 --- a/vio/CMakeLists.txt +++ b/vio/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ SET(VIO_SOURCES vio.c viosocket.c viossl.c viopipe.c vioshm.c viosslfactories.c) ADD_CONVENIENCE_LIBRARY(vio ${VIO_SOURCES}) TARGET_LINK_LIBRARIES(vio ${LIBSOCKET}) -INSTALL_DEBUG_SYMBOLS(vio) IF(MSVC) INSTALL_DEBUG_TARGET(vio DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/zlib/CMakeLists.txt b/zlib/CMakeLists.txt index 0be1f976b39..7668ce723b8 100644 --- a/zlib/CMakeLists.txt +++ b/zlib/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -23,7 +23,6 @@ SET(ZLIB_SOURCES adler32.c compress.c crc32.c crc32.h deflate.c deflate.h gzio. ADD_CONVENIENCE_LIBRARY(zlib ${ZLIB_SOURCES}) RESTRICT_SYMBOL_EXPORTS(zlib) -INSTALL_DEBUG_SYMBOLS(zlib) IF(MSVC) INSTALL_DEBUG_TARGET(zlib DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() |